log_producer_manager.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. #include "log_producer_manager.h"
  2. #include "inner_log.h"
  3. //#include "md5.h"
  4. #include "sds.h"
  5. // change from 100ms to 1000s, reduce wake up when app switch to back
  6. #define LOG_PRODUCER_FLUSH_INTERVAL_MS 100
  7. #define LOG_PRODUCER_READ_INTERVAL_MS 100
  8. #define MAX_LOGGROUP_QUEUE_SIZE 1024
  9. #define MIN_LOGGROUP_QUEUE_SIZE 128
  10. #define MAX_MANAGER_FLUSH_COUNT 100 // 10MS * 100
  11. #define MAX_SENDER_FLUSH_COUNT 100 // 10ms * 100
  12. log_producer_manager* g_producer_manager[10];
  13. int producer_manager_num = 0;
  14. #ifdef WIN32
  15. DWORD WINAPI log_producer_send_thread(LPVOID param);
  16. #else
  17. void* log_producer_send_thread(void* param);
  18. #endif
  19. void _try_flush_loggroup(log_producer_manager* producer_manager)
  20. {
  21. int loggroup_size;
  22. int rst;
  23. int32_t now_time = time(NULL);
  24. CS_ENTER(producer_manager->lock);
  25. if (producer_manager->builder != NULL)
  26. {
  27. log_group_builder* builder = producer_manager->builder;
  28. producer_manager->builder = NULL;
  29. CS_LEAVE(producer_manager->lock);
  30. loggroup_size = builder->loggroup_size;
  31. rst = log_queue_push(producer_manager->loggroup_queue, builder);
  32. //aos_debug_log((LB, "try push loggroup to flusher, size : %d, status : %d", (int)loggroup_size, rst));
  33. if (rst != 0)
  34. {
  35. aos_error_log((LB, "try push loggroup to flusher failed, force drop this log group, error code : %d", rst));
  36. if (producer_manager->send_done_function != NULL)
  37. {
  38. producer_manager->send_done_function(LOG_PRODUCER_DROP_ERROR, loggroup_size, 0,
  39. NULL, "try push loggroup to flusher failed, force drop this log group", NULL, producer_manager->user_param);
  40. }
  41. log_group_destroy(builder);
  42. }
  43. else
  44. {
  45. producer_manager->totalBufferSize += loggroup_size;
  46. COND_SIGNAL(producer_manager->triger_cond);
  47. }
  48. }
  49. else
  50. {
  51. CS_LEAVE(producer_manager->lock);
  52. }
  53. }
  54. #ifdef WIN32
  55. //从db中读取log
  56. DWORD WINAPI log_producer_read_persistent_thread(LPVOID param)
  57. #else
  58. //从db中读取log
  59. void* log_producer_read_persistent_thread(void* param)
  60. #endif
  61. {
  62. int i = 0;
  63. int forceClean = 0;
  64. int32_t sleepMs = LOG_PRODUCER_READ_INTERVAL_MS;
  65. log_group_builder* builder = NULL;
  66. log_producer_manager* producer_manager = (log_producer_manager*)param;
  67. log_persistent_manager* persistent_manager;
  68. log_producer_config* config = producer_manager->producer_config;
  69. aos_info_log((LB, "start run read persistent thread"));
  70. while (producer_manager->shutdown == 0) {
  71. i = 0;
  72. for (i = 0; i < sleepMs; i += LOG_PRODUCER_READ_INTERVAL_MS)
  73. {
  74. #ifdef WIN32
  75. Sleep(LOG_PRODUCER_READ_INTERVAL_MS);
  76. #else
  77. usleep(LOG_PRODUCER_READ_INTERVAL_MS * 1000);
  78. #endif
  79. }
  80. do {
  81. // if send queue is full, skip pack and send data
  82. CS_ENTER(producer_manager->lock);
  83. if (producer_manager->send_param_queue_write - producer_manager->send_param_queue_read >= producer_manager->send_param_queue_size)
  84. {
  85. aos_debug_log((LB, "send queue is full, send queue write : %d, send queue read : %d, skip pack and send data",
  86. producer_manager->send_param_queue_write, producer_manager->send_param_queue_read));
  87. CS_LEAVE(producer_manager->lock);
  88. sleepMs = LOG_PRODUCER_READ_INTERVAL_MS;
  89. break;
  90. }
  91. CS_LEAVE(producer_manager->lock);
  92. if (config->endpoint == NULL ||
  93. config->skyeye_user_topic == NULL ||
  94. config->skyeye_sys_topic == NULL ||
  95. config->beidou_topic == NULL ||
  96. config->business_user_topic == NULL ||
  97. config->business_sys_topic == NULL ||
  98. config->vtmweb_topic == NULL) {
  99. aos_debug_log((LB, "endpoint is NULL, skip pack and send data"));
  100. sleepMs = LOG_PRODUCER_READ_INTERVAL_MS;
  101. break;
  102. }
  103. //get send data from persistent
  104. persistent_manager = producer_manager->persistent_manager;
  105. if (persistent_manager != NULL) {
  106. CS_ENTER(persistent_manager->lock);
  107. builder = log_persistent_manager_get_log(persistent_manager);//从db中读取log,数量为logCountPerPackage
  108. CS_LEAVE(persistent_manager->lock);
  109. if (builder != NULL) {
  110. int i = 0;
  111. int n_logs = 0;
  112. char modular[MAX_PATH] = { 0 };
  113. char uuid[MAX_LOG_COUNT][MAX_UUID_LEN] = { 0 };
  114. lz4_log_buf* lz4_buf = NULL;
  115. //缓存uuid和n_logs、modular
  116. for (i = 0; i < builder->grp->n_logs; i++) {
  117. strcpy(uuid[i], builder->grp->logs[i].uuid);
  118. }
  119. n_logs = builder->grp->n_logs;
  120. if (n_logs > 0) {
  121. sleepMs = config->logCountPerPackage / n_logs * LOG_PRODUCER_READ_INTERVAL_MS;
  122. }
  123. strcpy(modular, builder->modular);
  124. aos_debug_log((LB, "get from persistent log count : %d.", n_logs));
  125. // process data
  126. CS_ENTER(producer_manager->lock);
  127. producer_manager->totalBufferSize -= builder->loggroup_size;
  128. CS_LEAVE(producer_manager->lock);
  129. //添加terminalNo, SN, 效率贼低
  130. if (config->skyeyeTerminalNo != NULL) {
  131. add_log_terminal_no(builder, config->skyeyeTerminalNo);
  132. }
  133. if (config->skyeyeSn != NULL) {
  134. static char local_ip_str[64] = "0.0.0.0";
  135. static long ipInit = 0;
  136. //get ip every 1000 times
  137. if (ipInit % 1000 == 0)
  138. GetLocalIP(local_ip_str);
  139. ipInit++;
  140. add_log_terminal_sn_other(builder, config->skyeyeSn, local_ip_str);
  141. }
  142. clear_errJson_msg(builder);
  143. // check compress type
  144. if (config->compressType == 1)//是否压缩
  145. lz4_buf = serialize_to_proto_buf_with_malloc_lz4(builder);
  146. else
  147. lz4_buf = serialize_to_proto_buf_with_malloc_no_lz4(builder);
  148. if (lz4_buf == NULL)
  149. {
  150. aos_error_log((LB, "serialize loggroup to proto buf with lz4 failed"));
  151. if (producer_manager->send_done_function)
  152. {
  153. producer_manager->send_done_function(LOG_PRODUCER_DROP_ERROR, builder->loggroup_size, 0,
  154. NULL, "serialize loggroup to proto buf with lz4 failed", NULL, producer_manager->user_param);
  155. }
  156. if (producer_manager->uuid_send_done_function != NULL)
  157. {
  158. producer_manager->uuid_send_done_function(LOG_PRODUCER_INVALID,
  159. builder->loggroup_size, 0, NULL, "invalid send param, magic num not found", NULL,
  160. producer_manager->uuid_user_param, builder->grp->n_logs, uuid, builder->modular);//send_done后执行删除对应uuid的数据
  161. }
  162. }
  163. else
  164. {
  165. log_producer_send_param* send_param;
  166. CS_ENTER(producer_manager->lock);
  167. producer_manager->totalBufferSize += lz4_buf->length;//添加了额外字段、压缩后,最终长度
  168. send_param = create_log_producer_send_param(config, producer_manager, lz4_buf, builder);
  169. /*
  170. int i = 0;
  171. for (i = 0; i < send_param->log_buf->n_logs; i++)
  172. {
  173. char uuid[MAX_UUID_LEN];
  174. memset(uuid, 0, MAX_UUID_LEN);
  175. memcpy(uuid, send_param->log_buf->uuid[i], MAX_UUID_LEN);
  176. send_log_data("http://127.0.0.1:9000/read_uuid", uuid);
  177. }
  178. */
  179. producer_manager->send_param_queue[producer_manager->send_param_queue_write++ % producer_manager->send_param_queue_size] = send_param;//放入发送队列
  180. CS_LEAVE(producer_manager->lock);
  181. }
  182. log_group_destroy(builder);
  183. //set log status to sending
  184. CS_ENTER(persistent_manager->lock);
  185. log_persistent_manager_updata_log_status(persistent_manager, modular, n_logs, uuid, LOG_DB_STATUS_SENDING); //更新数据状态为sending
  186. CS_LEAVE(persistent_manager->lock);
  187. /** 每次都是24 [Gifur@2022720]*/
  188. persistent_manager->SendLogCount += n_logs;
  189. aos_debug_log((LB, "get from persistent, send log count : %d.", persistent_manager->SendLogCount));
  190. CS_ENTER(persistent_manager->lock);
  191. if (forceClean || persistent_manager->SendLogCount - persistent_manager->LastCleanLogCount > 1000) {//每5000条清理一次
  192. log_persistent_manager_clean(persistent_manager, modular);
  193. persistent_manager->LastCleanLogCount = persistent_manager->SendLogCount;
  194. forceClean = 0;
  195. }
  196. else
  197. {
  198. int count = db_get_count(persistent_manager->db_manager);
  199. if(count > (persistent_manager->config->maxPersistentLogCount + 1000))
  200. {
  201. log_persistent_manager_clean(persistent_manager, modular);
  202. persistent_manager->LastCleanLogCount = persistent_manager->SendLogCount;
  203. }
  204. }
  205. CS_LEAVE(persistent_manager->lock);
  206. continue;
  207. }
  208. else {
  209. if ((producer_manager->send_param_queue_write - producer_manager->send_param_queue_read) < producer_manager->send_param_queue_size)
  210. {
  211. sleepMs = 1200 * LOG_PRODUCER_READ_INTERVAL_MS;//120s send once if can not get enough
  212. forceClean = 1;
  213. }
  214. else
  215. sleepMs = 10 * LOG_PRODUCER_READ_INTERVAL_MS;//普通暂停时间,等待发送线程发送
  216. //aos_debug_log((LB, "get from persistent is null."));
  217. break;
  218. }
  219. }
  220. break;
  221. } while (1);
  222. // send data
  223. CS_ENTER(producer_manager->lock);
  224. if (producer_manager->send_threads != NULL)
  225. {//spshell.exe, with send threads
  226. // if send thread count > 0, we just push send_param to sender queue
  227. while (producer_manager->send_param_queue_write > producer_manager->send_param_queue_read && !log_queue_isfull(producer_manager->sender_data_queue))
  228. {
  229. log_producer_send_param* send_param = producer_manager->send_param_queue[producer_manager->send_param_queue_read++ % producer_manager->send_param_queue_size];
  230. // push always success
  231. log_queue_push(producer_manager->sender_data_queue, send_param);//往发送队列存
  232. }
  233. }
  234. else if (producer_manager->send_param_queue_write > producer_manager->send_param_queue_read)
  235. {
  236. //sphost.exe ,no send threads
  237. // if no sender thread, we send this packet out in flush thread
  238. log_producer_send_param* send_param = producer_manager->send_param_queue[producer_manager->send_param_queue_read++ % producer_manager->send_param_queue_size];
  239. log_producer_send_data(send_param);
  240. }
  241. CS_LEAVE(producer_manager->lock);
  242. }
  243. aos_info_log((LB, "exit read persistent thread"));
  244. return 0;
  245. }
  246. #ifdef WIN32
  247. DWORD WINAPI log_producer_write_persistent_thread(LPVOID param)
  248. #else
  249. //队列取数据,写入db
  250. void* log_producer_write_persistent_thread(void* param)
  251. #endif
  252. {
  253. int lens;
  254. int first_process = 1;
  255. int32_t now;
  256. int32_t last_write_time = time(NULL);
  257. shareq_frame share_frame = {0};
  258. serialize_buf buf = { 0 };
  259. int t_count = 0;
  260. int discardMsg = 0;
  261. log_producer_manager* producer_manager = (log_producer_manager*)param;
  262. log_persistent_manager* persistent_manager;
  263. share_frame.data = &buf;
  264. aos_info_log((LB, "start run write persistent thread"));
  265. while (producer_manager->shutdown == 0)
  266. {
  267. int count = 0;
  268. CS_ENTER(producer_manager->lock);
  269. COND_WAIT_TIME(producer_manager->triger_cond, producer_manager->lock,LOG_PRODUCER_FLUSH_INTERVAL_MS);
  270. CS_LEAVE(producer_manager->lock);
  271. persistent_manager = producer_manager->persistent_manager;
  272. if (persistent_manager == NULL) {
  273. continue;
  274. }
  275. //第一次执行清理
  276. if (producer_manager->producer_config->enableGuarder && first_process) {
  277. int recoverRst;
  278. CS_ENTER(persistent_manager->lock);
  279. recoverRst = log_persistent_manager_recover(persistent_manager);
  280. if (recoverRst != 0)
  281. {
  282. aos_error_log((LB, "recover log persistent manager failed, result %d",
  283. recoverRst));
  284. }
  285. else
  286. {
  287. aos_info_log((LB, "recover log persistent manager success"));
  288. }
  289. CS_LEAVE(persistent_manager->lock);
  290. first_process = 0;
  291. }
  292. //huchen add,注意事务不能被其他线程打断,通过lock保证事务执行完成
  293. CS_ENTER(persistent_manager->lock);
  294. lens = ClibsharequeueGetLens(producer_manager->share_queue);
  295. now = time(NULL);
  296. if ((lens < MAX_SHAREQUEUE_LENS / 4) && (now - last_write_time < 3)) {//每3s执行一次事务
  297. CS_LEAVE(persistent_manager->lock);
  298. continue;
  299. }
  300. aos_debug_log((LB, "get from share queue log count : %d.", lens));
  301. t_count = db_get_count(persistent_manager->db_manager);
  302. if (t_count > (persistent_manager->config->maxPersistentLogCount + 2000)) {
  303. discardMsg = 1;
  304. aos_warn_log((LB, "give up the db content, bcz db count %d exceeds max count %d", t_count, persistent_manager->config->maxPersistentLogCount));
  305. }
  306. else {
  307. discardMsg = 0;
  308. }
  309. // try read queue
  310. do
  311. {
  312. int result;
  313. //get send data from share buffer
  314. memset(share_frame.data, 0, sizeof(serialize_buf));
  315. result = ClibsharequeueGetAndDel(producer_manager->share_queue, &share_frame);
  316. if (result == 1)
  317. {
  318. if (discardMsg)
  319. continue;
  320. if (count == 0) {
  321. result = log_persistent_manager_transaction_begin(persistent_manager);
  322. if (result != LOG_PRODUCER_OK) {
  323. aos_warn_log((LB, "get from share queue, uuid %s begin failed.", buf.uuid));
  324. }
  325. result = log_persistent_manager_save_log(persistent_manager, share_frame.data);
  326. if (result != LOG_PRODUCER_OK) {
  327. aos_warn_log((LB, "get from share queue, uuid %s save failed.", buf.uuid));
  328. }
  329. }
  330. else {
  331. result = log_persistent_manager_save_log(persistent_manager, share_frame.data);
  332. if (result != LOG_PRODUCER_OK) {
  333. aos_warn_log((LB, "get from share queue, uuid %s save failed.", buf.uuid));
  334. }
  335. }
  336. count++;
  337. continue;
  338. }
  339. if (count > 0) {
  340. result = log_persistent_manager_transaction_commit(persistent_manager);
  341. if (result != LOG_PRODUCER_OK) {
  342. aos_warn_log((LB, "get from share queue, uuid %s commit failed.", buf.uuid));
  343. }
  344. }
  345. last_write_time = time(NULL);
  346. //aos_debug_log((LB, "get from share queue is null."));
  347. break;
  348. } while (1);
  349. CS_LEAVE(persistent_manager->lock);
  350. // if no job, check now loggroup
  351. _try_flush_loggroup(producer_manager);
  352. }
  353. aos_info_log((LB, "exit write persistent thread"));
  354. return 0;
  355. }
  356. #ifdef WIN32
  357. DWORD WINAPI log_producer_flush_thread(LPVOID param) //flush the log to spshell
  358. #else
  359. void* log_producer_flush_thread(void* param)
  360. #endif
  361. {
  362. while(1)
  363. {
  364. int i = 0;
  365. for(i =0; i < producer_manager_num; i++)
  366. {
  367. void* data = NULL;
  368. shareq_frame frame = { 0 };
  369. serialize_buf buf = { 0 };
  370. log_producer_manager* producer_manager = (log_producer_manager*)g_producer_manager[i];
  371. log_persistent_manager* persistent_manager;
  372. frame.data = &buf;
  373. aos_info_log((LB, "start run flusher thread"));
  374. persistent_manager = producer_manager->persistent_manager;
  375. if (persistent_manager == NULL || producer_manager->shutdown == 1) {
  376. continue;
  377. }
  378. do
  379. {
  380. //get send data from buffer
  381. data = log_queue_trypop(producer_manager->loggroup_queue);
  382. if (data != NULL)
  383. {
  384. int insert_result = 0;
  385. log_group_builder* builder = (log_group_builder*)data;
  386. memset(frame.data, 0, sizeof(serialize_buf));
  387. serialize_to_buf(&builder->grp->logs[0], frame.data);
  388. frame.size = sizeof(serialize_buf);
  389. CS_ENTER(persistent_manager->lock);
  390. insert_result = ClibsharequeueInsert(producer_manager->share_queue, &frame, 0);
  391. CS_LEAVE(persistent_manager->lock);
  392. if (insert_result == 0) {
  393. aos_warn_log((LB, "get from queue, uuid %s insert failed.", buf.uuid));
  394. }
  395. log_group_destroy(builder);
  396. continue;
  397. }
  398. //aos_debug_log((LB, "get from queue is null."));
  399. break;
  400. } while (1);
  401. // if no job, check now loggroup
  402. _try_flush_loggroup(producer_manager);
  403. }
  404. Sleep(1000);
  405. }
  406. aos_info_log((LB, "exit flusher thread"));
  407. return 0;
  408. }
  409. log_producer_manager* create_log_producer_manager(log_producer_config* producer_config)
  410. {
  411. int32_t base_queue_size;
  412. log_producer_manager* producer_manager;
  413. static int isFlushThreadExist = 0;
  414. producer_manager = g_producer_manager[producer_manager_num] = (log_producer_manager*)malloc(sizeof(log_producer_manager));
  415. memset(producer_manager, 0, sizeof(log_producer_manager));
  416. producer_manager->producer_config = producer_config;
  417. base_queue_size = producer_config->maxBufferBytes / (producer_config->logBytesPerPackage + 1) + 10;
  418. if (base_queue_size < MIN_LOGGROUP_QUEUE_SIZE)
  419. {
  420. base_queue_size = MIN_LOGGROUP_QUEUE_SIZE;
  421. }
  422. else if (base_queue_size > MAX_LOGGROUP_QUEUE_SIZE)
  423. {
  424. base_queue_size = MAX_LOGGROUP_QUEUE_SIZE;
  425. }
  426. producer_manager->loggroup_queue = log_queue_create(base_queue_size);
  427. if (producer_config->useMemoryType == 0) {
  428. producer_manager->share_queue = ClibsharequeueCreate2("RVC_LOG_SDK_QUEUE");
  429. } else {
  430. producer_manager->share_queue = ClibsharequeueCreate3("RVC_LOG_SDK_QUEUE", 1);
  431. }
  432. producer_manager->send_param_queue_size = base_queue_size * 2;
  433. producer_manager->send_param_queue = malloc(sizeof(log_producer_send_param*) * producer_manager->send_param_queue_size);
  434. producer_manager->triger_cond = CreateCond();
  435. producer_manager->lock = CreateCriticalSection();
  436. producer_manager_num++;
  437. if (!isFlushThreadExist)
  438. {
  439. isFlushThreadExist = 1;
  440. THREAD_INIT(producer_manager->flush_thread, log_producer_flush_thread, NULL);//以当前情况,无法减少这个flush线程,因为是跟producer_config绑定的
  441. }
  442. //huchen add for only Guarder has read 、flush and send thread
  443. if (producer_config->enableGuarder){//create upload part
  444. aos_debug_log((LB, "to create send&read&write thread %d", producer_config->sendThreadCount));
  445. if (producer_config->sendThreadCount > 0)
  446. {
  447. int32_t threadId;
  448. producer_manager->multi_thread_send_count = 0;
  449. producer_manager->send_threads = (THREAD*)malloc(sizeof(THREAD) * producer_config->sendThreadCount);
  450. producer_manager->sender_data_queue = log_queue_create(base_queue_size * 2);
  451. threadId = 0;
  452. for (; threadId < producer_manager->producer_config->sendThreadCount; ++threadId)
  453. {
  454. THREAD_INIT(producer_manager->send_threads[threadId], log_producer_send_thread, producer_manager);
  455. }
  456. producer_manager->read_thread = (THREAD*)malloc(sizeof(THREAD));
  457. aos_debug_log((LB, "to create read thread"));
  458. THREAD_INIT(producer_manager->read_thread, log_producer_read_persistent_thread, producer_manager);
  459. producer_manager->write_thread = (THREAD*)malloc(sizeof(THREAD));
  460. aos_debug_log((LB, "to create write thread"));
  461. THREAD_INIT(producer_manager->write_thread, log_producer_write_persistent_thread, producer_manager);
  462. }
  463. }
  464. aos_debug_log((LB, "create log producer manager, base_queue_size %d, name: %s", base_queue_size, producer_manager->producer_config->skyeyeEntityName));
  465. return producer_manager;
  466. }
  467. void _push_last_loggroup(log_producer_manager* manager)
  468. {
  469. log_group_builder* builder;
  470. CS_ENTER(manager->lock);
  471. builder = manager->builder;
  472. manager->builder = NULL;
  473. if (builder != NULL)
  474. {
  475. int32_t status;
  476. size_t loggroup_size = builder->loggroup_size;
  477. aos_debug_log((LB, "try push loggroup to flusher, size : %d", (int)builder->loggroup_size));
  478. status = log_queue_push(manager->loggroup_queue, builder);
  479. if (status != 0)
  480. {
  481. aos_error_log((LB, "try push loggroup to flusher failed, force drop this log group, error code : %d", status));
  482. log_group_destroy(builder);
  483. }
  484. else
  485. {
  486. manager->totalBufferSize += loggroup_size;
  487. COND_SIGNAL(manager->triger_cond);
  488. }
  489. }
  490. CS_LEAVE(manager->lock);
  491. }
  492. void destroy_log_producer_manager(log_producer_manager* manager)
  493. {
  494. int32_t total_wait_count;
  495. int waitCount = 0;
  496. // when destroy instance, flush last loggroup
  497. _push_last_loggroup(manager);
  498. aos_info_log((LB, "flush out producer loggroup begin"));
  499. total_wait_count = manager->producer_config->destroyFlusherWaitTimeoutSec > 0 ? manager->producer_config->destroyFlusherWaitTimeoutSec * 100 : MAX_MANAGER_FLUSH_COUNT;
  500. total_wait_count += manager->producer_config->destroySenderWaitTimeoutSec > 0 ? manager->producer_config->destroySenderWaitTimeoutSec * 100 : MAX_SENDER_FLUSH_COUNT;
  501. #ifdef WIN32
  502. Sleep(10);
  503. #else
  504. usleep(10 * 1000);
  505. #endif
  506. while (log_queue_size(manager->loggroup_queue) > 0 ||
  507. manager->send_param_queue_write - manager->send_param_queue_read > 0 ||
  508. (manager->sender_data_queue != NULL && log_queue_size(manager->sender_data_queue) > 0))
  509. {
  510. #ifdef WIN32
  511. Sleep(10);
  512. #else
  513. usleep(10 * 1000);
  514. #endif
  515. if (++waitCount == total_wait_count)
  516. {
  517. break;
  518. }
  519. }
  520. if (waitCount == total_wait_count)
  521. {
  522. aos_error_log((LB, "try flush out producer loggroup error, force exit, now loggroup %d", (int)(log_queue_size(manager->loggroup_queue))));
  523. }
  524. else
  525. {
  526. aos_info_log((LB, "flush out producer loggroup success"));
  527. }
  528. manager->shutdown = 1;
  529. // destroy root resources
  530. COND_SIGNAL(manager->triger_cond);
  531. if (manager->write_thread != NULL)
  532. {
  533. aos_info_log((LB, "join write thread begin"));
  534. THREAD_JOIN(manager->write_thread);
  535. free(manager->write_thread);
  536. manager->write_thread = NULL;
  537. aos_info_log((LB, "join write thread success"));
  538. }
  539. if (manager->read_thread != NULL)
  540. {
  541. aos_info_log((LB, "join read thread begin"));
  542. THREAD_JOIN(manager->read_thread);
  543. free(manager->read_thread);
  544. manager->read_thread = NULL;
  545. aos_info_log((LB, "join read thread success"));
  546. }
  547. aos_info_log((LB, "join flush thread begin"));
  548. THREAD_JOIN(manager->flush_thread);
  549. aos_info_log((LB, "join flush thread success"));
  550. if (manager->send_threads != NULL)
  551. {
  552. int32_t threadId;
  553. aos_info_log((LB, "join sender thread pool begin"));
  554. threadId = 0;
  555. for (; threadId < manager->producer_config->sendThreadCount; ++threadId)
  556. {
  557. THREAD_JOIN(manager->send_threads[threadId]);
  558. }
  559. free(manager->send_threads);
  560. manager->send_threads = NULL;
  561. aos_info_log((LB, "join sender thread pool success"));
  562. }
  563. DeleteCond(manager->triger_cond);
  564. log_queue_destroy(manager->loggroup_queue);
  565. ClibsharequeueDestroy(manager->share_queue);
  566. if (manager->sender_data_queue != NULL)
  567. {
  568. aos_info_log((LB, "flush out sender queue begin"));
  569. while (log_queue_size(manager->sender_data_queue) > 0)
  570. {
  571. void* send_param = log_queue_trypop(manager->sender_data_queue);
  572. if (send_param != NULL)
  573. {
  574. log_producer_send_fun(send_param);
  575. }
  576. }
  577. log_queue_destroy(manager->sender_data_queue);
  578. aos_info_log((LB, "flush out sender queue success"));
  579. }
  580. ReleaseCriticalSection(manager->lock);
  581. if (manager->send_param_queue != NULL)
  582. {
  583. free(manager->send_param_queue);
  584. manager->send_param_queue = NULL;
  585. }
  586. free(manager);
  587. //TODO
  588. //g_producer_manager[producer_manager_num]
  589. }
  590. #define LOG_PRODUCER_MANAGER_ADD_LOG_BEGIN CS_ENTER(producer_manager->lock); \
  591. if (producer_manager->builder == NULL) \
  592. { \
  593. if (log_queue_isfull(producer_manager->loggroup_queue)) \
  594. { \
  595. CS_LEAVE(producer_manager->lock); \
  596. return LOG_PRODUCER_DROP_ERROR; \
  597. } \
  598. producer_manager->builder = log_group_create(producer_manager->producer_config); \
  599. producer_manager->builder->private_value = producer_manager; \
  600. }
  601. #define LOG_PRODUCER_MANAGER_ADD_LOG_END { int ret; \
  602. int status; \
  603. log_group_builder * builder = producer_manager->builder; \
  604. if (flush == 0 && producer_manager->builder->loggroup_size < producer_manager->producer_config->logBytesPerPackage && producer_manager->builder->grp->n_logs < producer_manager->producer_config->logCountPerPackage) \
  605. { \
  606. CS_LEAVE(producer_manager->lock); \
  607. return LOG_PRODUCER_OK; \
  608. } \
  609. ret = LOG_PRODUCER_OK; \
  610. producer_manager->builder = NULL; \
  611. status = log_queue_push(producer_manager->loggroup_queue, builder); \
  612. if (status != 0) \
  613. { \
  614. do \
  615. { \
  616. char *szLogBuf = AllocLogBuf(); \
  617. char szLevel[10]; \
  618. char szTime[32]; \
  619. szLogBuf[MFLOG_BUFSIZE] = 0; \
  620. MakeLogPrefix( LM_ERROR, szLevel, szTime ); \
  621. _snprintf(szLogBuf, MFLOG_BUFSIZE, "try push loggroup to flusher failed, force drop this log group, error code : %d", status); \
  622. RvcLog_log( __FILE__,__LINE__, 1, LM_ERROR, szLevel, szTime, szLogBuf); \
  623. }while(0); \
  624. ret = LOG_PRODUCER_DROP_ERROR; \
  625. log_group_destroy(builder); \
  626. } \
  627. else \
  628. { \
  629. COND_SIGNAL(producer_manager->triger_cond); \
  630. } \
  631. CS_LEAVE(producer_manager->lock); \
  632. return ret; }
  633. log_producer_result log_producer_manager_add_log(log_producer_manager* producer_manager, log_item* log, int flush, long record_time)
  634. {
  635. LOG_PRODUCER_MANAGER_ADD_LOG_BEGIN;
  636. if (0 != add_log(producer_manager->builder, producer_manager->start_time, producer_manager->pack_index, log, record_time)) {
  637. aos_error_log((LB, "add log failed !"));
  638. }
  639. LOG_PRODUCER_MANAGER_ADD_LOG_END;
  640. }
  641. log_producer_result log_producer_manager_add_beidou_log(log_producer_manager* producer_manager, beidou_log_item* log, int flush)
  642. {
  643. LOG_PRODUCER_MANAGER_ADD_LOG_BEGIN;
  644. if (0 != add_beidou_log(producer_manager->builder, log)) {
  645. aos_error_log((LB, "add log failed !"));
  646. }
  647. LOG_PRODUCER_MANAGER_ADD_LOG_END;
  648. }
  649. log_producer_result log_producer_manager_add_log_raw(log_producer_manager* producer_manager, build_item* log, int flush)
  650. {
  651. LOG_PRODUCER_MANAGER_ADD_LOG_BEGIN;
  652. add_log_raw(producer_manager->builder, log);
  653. LOG_PRODUCER_MANAGER_ADD_LOG_END;
  654. }
  655. log_producer_result log_producer_manager_add_log_group(log_producer_manager* producer_manager, log_group_builder* builder, int flush)
  656. {
  657. int ret;
  658. int status;
  659. CS_ENTER(producer_manager->lock);
  660. if (flush == 0 && builder->loggroup_size < producer_manager->producer_config->logBytesPerPackage && builder->grp->n_logs < producer_manager->producer_config->logCountPerPackage)
  661. {
  662. CS_LEAVE(producer_manager->lock);
  663. return LOG_PRODUCER_OK;
  664. }
  665. ret = LOG_PRODUCER_OK;
  666. builder->private_value = producer_manager;
  667. aos_debug_log((LB, "try push loggroup to flusher, size : %d, log count %d", (int)builder->loggroup_size, (int)builder->grp->n_logs));
  668. status = log_queue_push(producer_manager->loggroup_queue, builder);
  669. if (status != 0)
  670. {
  671. aos_error_log((LB, "try push loggroup to flusher failed, force drop this log group, error code : %d", status));
  672. ret = LOG_PRODUCER_DROP_ERROR;
  673. }
  674. else
  675. {
  676. COND_SIGNAL(producer_manager->triger_cond);
  677. }
  678. CS_LEAVE(producer_manager->lock);
  679. return ret;
  680. }