q_malloc.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /* $Id: q_malloc.c 5990 2009-08-20 06:58:17Z bogdan_iancu $
  2. *
  3. * Copyright (C) 2001-2003 FhG Fokus
  4. *
  5. * This file is part of opensips, a free SIP server.
  6. *
  7. * opensips is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version
  11. *
  12. * opensips is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. /*
  22. * History:
  23. * --------
  24. * ????-??-?? created by andrei
  25. * 2003-04-14 more debugging added in DBG_QM_MALLOC mode (andrei)
  26. * 2003-06-29 added qm_realloc (andrei)
  27. * 2004-07-19 fragments book keeping code and support for 64 bits
  28. * memory blocks (64 bits machine & size>=2^32) (andrei)
  29. * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
  30. * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
  31. * 2005-03-02 added qm_info() (andrei)
  32. * 2005-12-12 fixed realloc shrink real_used & used accounting;
  33. * fixed initial size (andrei)
  34. */
  35. #include "precompile.h"
  36. #include "config.h"
  37. #include "q_malloc.h"
  38. #include "memutil.h"
  39. #include <winpr/wtypes.h>
  40. #define TAG TOOLKIT_TAG("q_malloc")
  41. struct qm_block_helper
  42. {
  43. char* base_addr; //parent share memory address pointer
  44. char* init_addr; //current share memory address pointer;
  45. ssize_t offset; //init_addr - base_addr
  46. } qm_h;
  47. #ifdef RELATIVE_ADDR
  48. #define QMH_MINUS_FRAG(f) ((struct qm_frag*)((char*)(f) - (unsigned long)(qm_h.init_addr)))
  49. #define QMH_ADD_FRAG(f) ((struct qm_frag*)((char*)(f) + (unsigned long)(qm_h.init_addr)))
  50. #else
  51. #define QMH_MINUS_FRAG(f) ((struct qm_frag*)((char*)(f)))
  52. #define QMH_ADD_FRAG(f) ((struct qm_frag*)((char*)(f)))
  53. #endif // RELATIVE_ADDR
  54. /*useful macros*/
  55. #define FRAG_END(f) \
  56. ((struct qm_frag_end*)((char*)(f)+sizeof(struct qm_frag)+ \
  57. (f)->size))
  58. #define FRAG_NEXT(f) \
  59. ((struct qm_frag*)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \
  60. sizeof(struct qm_frag_end)))
  61. #define FRAG_PREV(f) \
  62. ( (struct qm_frag*) ( ((char*)(f)-sizeof(struct qm_frag_end))- \
  63. ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))->size- \
  64. sizeof(struct qm_frag) ) )
  65. #define PREV_FRAG_END(f) \
  66. ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))
  67. #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end))
  68. #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1))
  69. #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK) //upper alignment to 16*n
  70. #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK) //lower alignment to 16
  71. /*
  72. #define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s))
  73. #define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s))
  74. */
  75. /* finds the hash value for s, s=ROUNDTO multiple*/
  76. #define GET_HASH(s) ( ((unsigned long)(s)<=QM_MALLOC_OPTIMIZE)?\
  77. (unsigned long)(s)/ROUNDTO: \
  78. QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
  79. QM_MALLOC_OPTIMIZE_FACTOR+1 )
  80. #define UN_HASH(h) ( ((unsigned long)(h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?\
  81. (unsigned long)(h)*ROUNDTO: \
  82. 1UL<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\
  83. QM_MALLOC_OPTIMIZE_FACTOR-1)\
  84. )
  85. /* mark/test used/unused frags */
  86. #define FRAG_MARK_USED(f)
  87. #define FRAG_CLEAR_USED(f)
  88. #define FRAG_WAS_USED(f) (1)
  89. /* other frag related defines:
  90. * MEM_COALESCE_FRAGS
  91. * MEM_FRAG_AVOIDANCE
  92. */
  93. #define MEM_FRAG_AVOIDANCE
  94. /* computes hash number for big buckets*/
  95. __inline static unsigned long big_hash_idx(unsigned long s)
  96. {
  97. int idx;
  98. /* s is rounded => s = k*2^n (ROUNDTO=2^n)
  99. * index= i such that 2^i > s >= 2^(i-1)
  100. *
  101. * => index = number of the first non null bit in s*/
  102. idx=sizeof(long)*8-1;
  103. for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
  104. return idx;
  105. }
  106. void qm_set_user_data(struct qm_block* qm, int idx, void* user_data)
  107. {
  108. qm->user_data[idx] = QMH_MINUS_FRAG(user_data);
  109. }
  110. void* qm_get_user_data(struct qm_block* qm, int idx)
  111. {
  112. return QMH_ADD_FRAG(qm->user_data[idx]);
  113. }
  114. static __inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
  115. {
  116. //WLog_DBG(TAG, "Enter %s", __FUNCTION__);
  117. struct qm_frag* f;
  118. struct qm_frag* prev;
  119. int hash;
  120. hash=GET_HASH(frag->size);
  121. for(f= QMH_ADD_FRAG(qm->free_hash[hash].head.u.nxt_free);
  122. f != &(qm->free_hash[hash].head); f = QMH_ADD_FRAG(f->u.nxt_free)) {
  123. if (frag->size <= f->size) break;
  124. }
  125. /*insert it here*/
  126. prev = FRAG_END(f)->prev_free;
  127. prev = QMH_ADD_FRAG(prev);
  128. prev->u.nxt_free = QMH_MINUS_FRAG(frag);
  129. FRAG_END(frag)->prev_free= QMH_MINUS_FRAG(prev);
  130. frag->u.nxt_free = QMH_MINUS_FRAG(f);
  131. FRAG_END(f)->prev_free= QMH_MINUS_FRAG(frag);
  132. qm->free_hash[hash].no++;
  133. //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__);
  134. }
  135. struct qm_block* qm_malloc_init(char* address, unsigned long size, int newcreate)
  136. {
  137. char* start;
  138. char* end;
  139. struct qm_block* qm;
  140. unsigned long init_overhead;
  141. int h;
  142. /* make address and size multiple of 8*/
  143. start = (char*)ROUNDUP((unsigned long)address);
  144. WLog_DBG(TAG, "QM_OPTIMIZE=%lu, /ROUNDTO=%lu", QM_MALLOC_OPTIMIZE, QM_MALLOC_OPTIMIZE/ROUNDTO);
  145. WLog_DBG(TAG, "QM_HASH_SIZE=%lu, qm_block size=%lu", QM_HASH_SIZE, (long)sizeof(struct qm_block));
  146. WLog_DBG(TAG, "params (%p, %lu), start=(%p, %lu)", address, size, start, (start-address));
  147. if (size < (unsigned long)(start - address)) return 0;
  148. size -= (start - address);
  149. if (size < (MIN_FRAG_SIZE + FRAG_OVERHEAD)) return 0;
  150. size = ROUNDDOWN(size);
  151. init_overhead = ROUNDUP(sizeof(struct qm_block)) + sizeof(struct qm_frag) + sizeof(struct qm_frag_end);
  152. WLog_DBG(TAG, "size= %lu, init_overhead=%lu", size, init_overhead);
  153. if (size < init_overhead) {
  154. WLog_ERR(TAG, "not enough mem to create our control structures !!");
  155. return 0;
  156. }
  157. qm = (struct qm_block*)start;
  158. memset(&qm_h, 0, sizeof(struct qm_block_helper));
  159. if (newcreate) {
  160. memset(qm, 0, sizeof(struct qm_block));
  161. qm->user_data[7] = qm;
  162. }
  163. qm_h.init_addr = (char*)qm;
  164. qm_h.base_addr = (char*)qm->user_data[7];
  165. qm_h.offset = (ssize_t)(qm_h.init_addr) - (ssize_t)qm_h.base_addr;
  166. WLog_DBG(TAG, "%s: offset from parent: 0x%x", (!!newcreate) ? "parent" : "child", qm_h.offset);
  167. if (newcreate) {
  168. qm->size = size;
  169. qm->real_used = init_overhead;
  170. qm->max_real_used = qm->real_used;
  171. #ifdef RELATIVE_ADDR
  172. qm->first_frag = (struct qm_frag*)(0 + ROUNDUP(sizeof(struct qm_block)));
  173. qm->last_frag_end = (struct qm_frag_end*)(size - sizeof(struct qm_frag_end));
  174. WLog_DBG(TAG, "relatively addr pointer: %p, %p", qm->first_frag, qm->last_frag_end);
  175. WLog_DBG(TAG, "absolutely addr pointer: %p, %p", QMH_ADD_FRAG(qm->first_frag), QMH_ADD_FRAG(qm->last_frag_end));
  176. #else
  177. end = start + size;
  178. qm->first_frag = (struct qm_frag*)(start + ROUNDUP(sizeof(struct qm_block)));
  179. qm->last_frag_end = (struct qm_frag_end*)(end - sizeof(struct qm_frag_end));
  180. #endif //RELATIVE_ADDR
  181. } else {
  182. WLog_DBG(TAG, "size: %d vs %d", qm->size, size);
  183. WLog_DBG(TAG, "real_used: %d vs %d", qm->real_used, init_overhead);
  184. }
  185. if (newcreate) {
  186. size -= init_overhead;
  187. /* init initial fragment*/
  188. QMH_ADD_FRAG(qm->first_frag)->size = size;
  189. QMH_ADD_FRAG(qm->last_frag_end)->size = size;
  190. /* init free_hash* */
  191. for (h = 0; h < QM_HASH_SIZE; h++) {
  192. qm->free_hash[h].head.u.nxt_free = QMH_MINUS_FRAG(&(qm->free_hash[h].head));
  193. qm->free_hash[h].tail.prev_free = QMH_MINUS_FRAG(&(qm->free_hash[h].head));
  194. qm->free_hash[h].head.size = 0;
  195. qm->free_hash[h].tail.size = 0;
  196. }
  197. /* link initial fragment into the free list*/
  198. qm_insert_free(qm, QMH_ADD_FRAG(qm->first_frag));
  199. } else {
  200. WLog_DBG(TAG, "first_frag size: %d vs %d", QMH_ADD_FRAG(qm->first_frag)->size, size);
  201. WLog_DBG(TAG, "last_frag_end size: %d vs %d", QMH_ADD_FRAG(qm->last_frag_end)->size, size);
  202. }
  203. WLog_DBG(TAG, "quit %s", __FUNCTION__);
  204. return qm;
  205. }
  206. static __inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
  207. {
  208. //WLog_DBG(TAG, "Enter %s", __FUNCTION__);
  209. struct qm_frag *prev;
  210. struct qm_frag *next;
  211. prev = FRAG_END(frag)->prev_free;
  212. prev = QMH_ADD_FRAG(prev);
  213. next=frag->u.nxt_free;
  214. prev->u.nxt_free=next;
  215. next = QMH_ADD_FRAG(next);
  216. FRAG_END(next)->prev_free= QMH_MINUS_FRAG(prev);
  217. //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__);
  218. }
  219. /** return the index of free hash bucket*/
  220. static __inline struct qm_frag* qm_find_free(struct qm_block* qm, unsigned long size, int* h)
  221. {
  222. //WLog_DBG(TAG, "Enter %s", __FUNCTION__);
  223. int hash;
  224. struct qm_frag* f;
  225. for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++) {
  226. for (f= QMH_ADD_FRAG(qm->free_hash[hash].head.u.nxt_free);
  227. f != &(qm->free_hash[hash].head);
  228. f= QMH_ADD_FRAG(f->u.nxt_free)) {
  229. if (f->size>=size) {
  230. *h=hash;
  231. //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__);
  232. return f;
  233. }
  234. }
  235. /*try in a bigger bucket*/
  236. }
  237. /* not found */
  238. //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__);
  239. return 0;
  240. }
  241. /* returns 0 on success, -1 on error;
  242. * new_size < size & rounded-up already!*/
  243. static __inline int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
  244. {
  245. //WLog_DBG(TAG, "Enter %s", __FUNCTION__);
  246. unsigned long rest;
  247. struct qm_frag* n;
  248. struct qm_frag_end* end;
  249. rest=f->size-new_size;
  250. #ifdef MEM_FRAG_AVOIDANCE
  251. if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
  252. (rest >= (FRAG_OVERHEAD+new_size))) {/* the residue fragm. is big enough*/
  253. #else
  254. if (rest > (FRAG_OVERHEAD+MIN_FRAG_SIZE)) {
  255. #endif
  256. f->size=new_size;
  257. /*split the fragment*/
  258. end=FRAG_END(f);
  259. end->size=new_size;
  260. n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
  261. n->size=rest-FRAG_OVERHEAD;
  262. FRAG_END(n)->size=n->size;
  263. FRAG_CLEAR_USED(n); /* never used */
  264. qm->real_used+=FRAG_OVERHEAD;
  265. /* reinsert n in free list*/
  266. qm_insert_free(qm, n);
  267. //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__);
  268. return 0;
  269. } else {
  270. /* we cannot split this fragment any more */
  271. //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__);
  272. return -1;
  273. }
  274. }
  275. void* qm_malloc(struct qm_block* qm, unsigned long size)
  276. {
  277. //WLog_DBG(TAG, "Enter %s", __FUNCTION__);
  278. struct qm_frag* f;
  279. int hash;
  280. /*size must be a multiple of 8*/
  281. size = ROUNDUP(size);
  282. if (size > (qm->size - qm->real_used)) {
  283. //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__);
  284. return 0;
  285. }
  286. /*search for a suitable free frag*/
  287. if ((f=qm_find_free(qm, size, &hash)) !=0) {
  288. /* we found it!*/
  289. /*detach it from the free list*/
  290. qm_detach_free(qm, f);
  291. /*mark it as "busy"*/
  292. f->u.is_free=0;
  293. qm->free_hash[hash].no--;
  294. /* we ignore split return */
  295. split_frag(qm, f, size);
  296. qm->real_used+=f->size;
  297. qm->used+=f->size;
  298. if (qm->max_real_used<qm->real_used)
  299. qm->max_real_used = qm->real_used;
  300. //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__);
  301. return (char*)f + sizeof(struct qm_frag);
  302. }
  303. //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__);
  304. return 0;
  305. }
  306. void qm_free(struct qm_block* qm, void* p)
  307. {
  308. //WLog_DBG(TAG, "Enter %s", __FUNCTION__);
  309. struct qm_frag* f;
  310. struct qm_frag* prev;
  311. struct qm_frag* next;
  312. unsigned long size;
  313. if (p==0) {
  314. return;
  315. }
  316. prev=next=0;
  317. f = (struct qm_frag*) ((char*) p - sizeof(struct qm_frag));
  318. size=f->size;
  319. qm->used-=size;
  320. qm->real_used-=size;
  321. /* join packets if possible*/
  322. next=FRAG_NEXT(f);
  323. if (((char*)next < (char*)QMH_ADD_FRAG(qm->last_frag_end)) &&( next->u.is_free)){
  324. /* join */
  325. qm_detach_free(qm, next);
  326. size+=next->size+FRAG_OVERHEAD;
  327. qm->real_used-=FRAG_OVERHEAD;
  328. qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
  329. }
  330. if (f > QMH_ADD_FRAG(qm->first_frag)){
  331. prev=FRAG_PREV(f);
  332. if (prev->u.is_free){
  333. /*join*/
  334. qm_detach_free(qm, prev);
  335. size+=prev->size+FRAG_OVERHEAD;
  336. qm->real_used-=FRAG_OVERHEAD;
  337. qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
  338. f=prev;
  339. }
  340. }
  341. f->size=size;
  342. FRAG_END(f)->size=f->size;
  343. qm_insert_free(qm, f);
  344. //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__);
  345. }
  346. void qm_lock(struct qm_block* qm)
  347. {
  348. DWORD i = 0;
  349. DWORD spin = 256;
  350. while (InterlockedCompareExchange((LONG*)&qm->lock, 1, 0) == 1) {
  351. for (; i < spin; ++i)
  352. if (qm->lock == 0)
  353. break;
  354. if (i >= spin)
  355. Sleep(1); /* yield cpu */
  356. }
  357. }
  358. void qm_unlock(struct qm_block* qm)
  359. {
  360. InterlockedExchange((LONG*)&qm->lock, 0);
  361. }