From e615188bb175fa7e815db6a24d7c20fde9c17e77 Mon Sep 17 00:00:00 2001 From: lnk Date: Tue, 9 Dec 2025 16:29:55 +0800 Subject: [PATCH] fix memleak --- LFtid1056/client2.cpp | 67 +++++++++++++++++++++++++++++- LFtid1056/cloudfront/code/main.cpp | 4 +- 2 files changed, 67 insertions(+), 4 deletions(-) diff --git a/LFtid1056/client2.cpp b/LFtid1056/client2.cpp index 4add88e..9b1e636 100644 --- a/LFtid1056/client2.cpp +++ b/LFtid1056/client2.cpp @@ -418,7 +418,7 @@ void on_read(uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf) { } /* 数据写入回调 */ -void on_write(uv_write_t* req, int status) { +/*void on_write(uv_write_t* req, int status) { ClientContext* ctx = static_cast(req->handle->data); if (status < 0) { @@ -428,6 +428,23 @@ void on_write(uv_write_t* req, int status) { std::cout << "on_write: " << ctx->device_info.mac << " down!" << std::endl; delete[] static_cast(req->data); // 释放发送数据缓冲区 delete req; // 释放写入请求 +}*/ + +//lnk20251209 +void on_write(uv_write_t* req, int status) { + ClientContext* ctx = static_cast(req->handle->data); + + if (status < 0) { + std::cerr << "[Device " << ctx->device_info.device_id + << "] SEND ERROR: " << uv_strerror(status) << std::endl; + } + std::cout << "on_write: " << ctx->device_info.mac << " down!" << std::endl; + + // 正确释放发送数据缓冲区 + auto* data_vec = static_cast*>(req->data); + delete data_vec; // ✅ 与 new std::vector 匹配 + + delete req; // ✅ 与 new uv_write_t 匹配 } /* 定时发送回调 */ @@ -541,7 +558,7 @@ void send_binary_data(ClientContext* ctx, const unsigned char* data, size_t data } // 新增函数:在事件循环线程中安全发送数据 -void safe_send_binary_data(ClientContext* ctx, std::vector data) { +/*void safe_send_binary_data(ClientContext* ctx, std::vector data) { uv_work_t* req = new uv_work_t; req->data = new std::pair>(ctx, std::move(data)); @@ -570,7 +587,53 @@ void safe_send_binary_data(ClientContext* ctx, std::vector data) delete pair; delete req; }); +}*/ + + +//lnk20251209 +void safe_send_binary_data(ClientContext* ctx, std::vector data) { + uv_work_t* req = new uv_work_t; + req->data = new std::pair>(ctx, std::move(data)); + + uv_queue_work(ctx->loop, req, + [](uv_work_t* ) { //req + // 在工作线程中不执行实际工作 + }, + [](uv_work_t* req, int status) { + auto* pair = static_cast>*>(req->data); + ClientContext* ctx = pair->first; + std::vector& original_data = pair->second; + + if (ctx->state == ConnectionState::CONNECTED) { + // 把数据复制到 write_req 自己持有的 vector 里 + auto* send_buf = new std::vector(original_data); + + uv_buf_t buf = uv_buf_init( + reinterpret_cast(send_buf->data()), + send_buf->size() + ); + + uv_write_t* write_req = new uv_write_t; + write_req->data = send_buf; // on_write 里 delete + + int ret = uv_write(write_req, (uv_stream_t*)&ctx->client, &buf, 1, on_write); + if (ret < 0) { + // 错误处理:uv_write 没接管,自己负责释放 + std::cerr << "[Device " << ctx->device_info.device_id + << "] uv_write error: " << uv_strerror(ret) << std::endl; + delete send_buf; + delete write_req; + } + } + + // 这里可以安全删除 pair,发送使用的是 send_buf 那份拷贝 + delete pair; + delete req; + } + ); } + + /* 连接关闭回调 */ void on_close(uv_handle_t* handle) { ClientContext* ctx = static_cast(handle->data); diff --git a/LFtid1056/cloudfront/code/main.cpp b/LFtid1056/cloudfront/code/main.cpp index b138f5e..8c933a9 100644 --- a/LFtid1056/cloudfront/code/main.cpp +++ b/LFtid1056/cloudfront/code/main.cpp @@ -669,13 +669,13 @@ void Front::mqproducerThread() extern thread_info_t thread_info[THREAD_CONNECTIONS]; -void cleanup_args(ThreadArgs* args) { +/*void cleanup_args(ThreadArgs* args) { for (int i = 0; i < args->argc; ++i) { free(args->argv[i]); // strdup 分配的 } delete[] args->argv; delete args; -} +}*/ void* cloudfrontthread(void* arg) {