/* $Id: q_malloc.c 5990 2009-08-20 06:58:17Z bogdan_iancu $ * * Copyright (C) 2001-2003 FhG Fokus * * This file is part of opensips, a free SIP server. * * opensips is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version * * opensips is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * History: * -------- * ????-??-?? created by andrei * 2003-04-14 more debugging added in DBG_QM_MALLOC mode (andrei) * 2003-06-29 added qm_realloc (andrei) * 2004-07-19 fragments book keeping code and support for 64 bits * memory blocks (64 bits machine & size>=2^32) (andrei) * GET_HASH s/ 4Gb mem., switched to long (andrei) * 2005-03-02 added qm_info() (andrei) * 2005-12-12 fixed realloc shrink real_used & used accounting; * fixed initial size (andrei) */ #include "precompile.h" #include "config.h" #include "q_malloc.h" #include "memutil.h" #include #define TAG TOOLKIT_TAG("q_malloc") struct qm_block_helper { char* base_addr; //parent share memory address pointer char* init_addr; //current share memory address pointer; ssize_t offset; //init_addr - base_addr } qm_h; #ifdef RELATIVE_ADDR #define QMH_MINUS_FRAG(f) ((struct qm_frag*)((char*)(f) - (unsigned long)(qm_h.init_addr))) #define QMH_ADD_FRAG(f) ((struct qm_frag*)((char*)(f) + (unsigned long)(qm_h.init_addr))) #else #define QMH_MINUS_FRAG(f) ((struct qm_frag*)((char*)(f))) #define QMH_ADD_FRAG(f) ((struct qm_frag*)((char*)(f))) #endif // RELATIVE_ADDR /*useful macros*/ #define FRAG_END(f) \ ((struct qm_frag_end*)((char*)(f)+sizeof(struct qm_frag)+ \ (f)->size)) #define FRAG_NEXT(f) \ ((struct qm_frag*)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \ sizeof(struct qm_frag_end))) #define FRAG_PREV(f) \ ( (struct qm_frag*) ( ((char*)(f)-sizeof(struct qm_frag_end))- \ ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))->size- \ sizeof(struct qm_frag) ) ) #define PREV_FRAG_END(f) \ ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end))) #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end)) #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1)) #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK) //upper alignment to 16*n #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK) //lower alignment to 16 /* #define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s)) #define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s)) */ /* finds the hash value for s, s=ROUNDTO multiple*/ #define GET_HASH(s) ( ((unsigned long)(s)<=QM_MALLOC_OPTIMIZE)?\ (unsigned long)(s)/ROUNDTO: \ QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \ QM_MALLOC_OPTIMIZE_FACTOR+1 ) #define UN_HASH(h) ( ((unsigned long)(h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?\ (unsigned long)(h)*ROUNDTO: \ 1UL<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\ QM_MALLOC_OPTIMIZE_FACTOR-1)\ ) /* mark/test used/unused frags */ #define FRAG_MARK_USED(f) #define FRAG_CLEAR_USED(f) #define FRAG_WAS_USED(f) (1) /* other frag related defines: * MEM_COALESCE_FRAGS * MEM_FRAG_AVOIDANCE */ #define MEM_FRAG_AVOIDANCE /* computes hash number for big buckets*/ __inline static unsigned long big_hash_idx(unsigned long s) { int idx; /* s is rounded => s = k*2^n (ROUNDTO=2^n) * index= i such that 2^i > s >= 2^(i-1) * * => index = number of the first non null bit in s*/ idx=sizeof(long)*8-1; for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--); return idx; } void qm_set_user_data(struct qm_block* qm, int idx, void* user_data) { qm->user_data[idx] = QMH_MINUS_FRAG(user_data); } void* qm_get_user_data(struct qm_block* qm, int idx) { return QMH_ADD_FRAG(qm->user_data[idx]); } static __inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag) { //WLog_DBG(TAG, "Enter %s", __FUNCTION__); struct qm_frag* f; struct qm_frag* prev; int hash; hash=GET_HASH(frag->size); for(f= QMH_ADD_FRAG(qm->free_hash[hash].head.u.nxt_free); f != &(qm->free_hash[hash].head); f = QMH_ADD_FRAG(f->u.nxt_free)) { if (frag->size <= f->size) break; } /*insert it here*/ prev = FRAG_END(f)->prev_free; prev = QMH_ADD_FRAG(prev); prev->u.nxt_free = QMH_MINUS_FRAG(frag); FRAG_END(frag)->prev_free= QMH_MINUS_FRAG(prev); frag->u.nxt_free = QMH_MINUS_FRAG(f); FRAG_END(f)->prev_free= QMH_MINUS_FRAG(frag); qm->free_hash[hash].no++; //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__); } struct qm_block* qm_malloc_init(char* address, unsigned long size, int newcreate) { char* start; char* end; struct qm_block* qm; unsigned long init_overhead; int h; /* make address and size multiple of 8*/ start = (char*)ROUNDUP((unsigned long)address); WLog_DBG(TAG, "QM_OPTIMIZE=%lu, /ROUNDTO=%lu", QM_MALLOC_OPTIMIZE, QM_MALLOC_OPTIMIZE/ROUNDTO); WLog_DBG(TAG, "QM_HASH_SIZE=%lu, qm_block size=%lu", QM_HASH_SIZE, (long)sizeof(struct qm_block)); WLog_DBG(TAG, "params (%p, %lu), start=(%p, %lu)", address, size, start, (start-address)); if (size < (unsigned long)(start - address)) return 0; size -= (start - address); if (size < (MIN_FRAG_SIZE + FRAG_OVERHEAD)) return 0; size = ROUNDDOWN(size); init_overhead = ROUNDUP(sizeof(struct qm_block)) + sizeof(struct qm_frag) + sizeof(struct qm_frag_end); WLog_DBG(TAG, "size= %lu, init_overhead=%lu", size, init_overhead); if (size < init_overhead) { WLog_ERR(TAG, "not enough mem to create our control structures !!"); return 0; } qm = (struct qm_block*)start; memset(&qm_h, 0, sizeof(struct qm_block_helper)); if (newcreate) { memset(qm, 0, sizeof(struct qm_block)); qm->user_data[7] = qm; } qm_h.init_addr = (char*)qm; qm_h.base_addr = (char*)qm->user_data[7]; qm_h.offset = (ssize_t)(qm_h.init_addr) - (ssize_t)qm_h.base_addr; WLog_DBG(TAG, "%s: offset from parent: 0x%x", (!!newcreate) ? "parent" : "child", qm_h.offset); if (newcreate) { qm->size = size; qm->real_used = init_overhead; qm->max_real_used = qm->real_used; #ifdef RELATIVE_ADDR qm->first_frag = (struct qm_frag*)(0 + ROUNDUP(sizeof(struct qm_block))); qm->last_frag_end = (struct qm_frag_end*)(size - sizeof(struct qm_frag_end)); WLog_DBG(TAG, "relatively addr pointer: %p, %p", qm->first_frag, qm->last_frag_end); WLog_DBG(TAG, "absolutely addr pointer: %p, %p", QMH_ADD_FRAG(qm->first_frag), QMH_ADD_FRAG(qm->last_frag_end)); #else end = start + size; qm->first_frag = (struct qm_frag*)(start + ROUNDUP(sizeof(struct qm_block))); qm->last_frag_end = (struct qm_frag_end*)(end - sizeof(struct qm_frag_end)); #endif //RELATIVE_ADDR } else { WLog_DBG(TAG, "size: %d vs %d", qm->size, size); WLog_DBG(TAG, "real_used: %d vs %d", qm->real_used, init_overhead); } if (newcreate) { size -= init_overhead; /* init initial fragment*/ QMH_ADD_FRAG(qm->first_frag)->size = size; QMH_ADD_FRAG(qm->last_frag_end)->size = size; /* init free_hash* */ for (h = 0; h < QM_HASH_SIZE; h++) { qm->free_hash[h].head.u.nxt_free = QMH_MINUS_FRAG(&(qm->free_hash[h].head)); qm->free_hash[h].tail.prev_free = QMH_MINUS_FRAG(&(qm->free_hash[h].head)); qm->free_hash[h].head.size = 0; qm->free_hash[h].tail.size = 0; } /* link initial fragment into the free list*/ qm_insert_free(qm, QMH_ADD_FRAG(qm->first_frag)); } else { WLog_DBG(TAG, "first_frag size: %d vs %d", QMH_ADD_FRAG(qm->first_frag)->size, size); WLog_DBG(TAG, "last_frag_end size: %d vs %d", QMH_ADD_FRAG(qm->last_frag_end)->size, size); } WLog_DBG(TAG, "quit %s", __FUNCTION__); return qm; } static __inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag) { //WLog_DBG(TAG, "Enter %s", __FUNCTION__); struct qm_frag *prev; struct qm_frag *next; prev = FRAG_END(frag)->prev_free; prev = QMH_ADD_FRAG(prev); next=frag->u.nxt_free; prev->u.nxt_free=next; next = QMH_ADD_FRAG(next); FRAG_END(next)->prev_free= QMH_MINUS_FRAG(prev); //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__); } /** return the index of free hash bucket*/ static __inline struct qm_frag* qm_find_free(struct qm_block* qm, unsigned long size, int* h) { //WLog_DBG(TAG, "Enter %s", __FUNCTION__); int hash; struct qm_frag* f; for (hash=GET_HASH(size); hashfree_hash[hash].head.u.nxt_free); f != &(qm->free_hash[hash].head); f= QMH_ADD_FRAG(f->u.nxt_free)) { if (f->size>=size) { *h=hash; //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__); return f; } } /*try in a bigger bucket*/ } /* not found */ //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__); return 0; } /* returns 0 on success, -1 on error; * new_size < size & rounded-up already!*/ static __inline int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size) { //WLog_DBG(TAG, "Enter %s", __FUNCTION__); unsigned long rest; struct qm_frag* n; struct qm_frag_end* end; rest=f->size-new_size; #ifdef MEM_FRAG_AVOIDANCE if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))|| (rest >= (FRAG_OVERHEAD+new_size))) {/* the residue fragm. is big enough*/ #else if (rest > (FRAG_OVERHEAD+MIN_FRAG_SIZE)) { #endif f->size=new_size; /*split the fragment*/ end=FRAG_END(f); end->size=new_size; n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end)); n->size=rest-FRAG_OVERHEAD; FRAG_END(n)->size=n->size; FRAG_CLEAR_USED(n); /* never used */ qm->real_used+=FRAG_OVERHEAD; /* reinsert n in free list*/ qm_insert_free(qm, n); //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__); return 0; } else { /* we cannot split this fragment any more */ //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__); return -1; } } void* qm_malloc(struct qm_block* qm, unsigned long size) { //WLog_DBG(TAG, "Enter %s", __FUNCTION__); struct qm_frag* f; int hash; /*size must be a multiple of 8*/ size = ROUNDUP(size); if (size > (qm->size - qm->real_used)) { //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__); return 0; } /*search for a suitable free frag*/ if ((f=qm_find_free(qm, size, &hash)) !=0) { /* we found it!*/ /*detach it from the free list*/ qm_detach_free(qm, f); /*mark it as "busy"*/ f->u.is_free=0; qm->free_hash[hash].no--; /* we ignore split return */ split_frag(qm, f, size); qm->real_used+=f->size; qm->used+=f->size; if (qm->max_real_usedreal_used) qm->max_real_used = qm->real_used; //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__); return (char*)f + sizeof(struct qm_frag); } //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__); return 0; } void qm_free(struct qm_block* qm, void* p) { //WLog_DBG(TAG, "Enter %s", __FUNCTION__); struct qm_frag* f; struct qm_frag* prev; struct qm_frag* next; unsigned long size; if (p==0) { return; } prev=next=0; f = (struct qm_frag*) ((char*) p - sizeof(struct qm_frag)); size=f->size; qm->used-=size; qm->real_used-=size; /* join packets if possible*/ next=FRAG_NEXT(f); if (((char*)next < (char*)QMH_ADD_FRAG(qm->last_frag_end)) &&( next->u.is_free)){ /* join */ qm_detach_free(qm, next); size+=next->size+FRAG_OVERHEAD; qm->real_used-=FRAG_OVERHEAD; qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */ } if (f > QMH_ADD_FRAG(qm->first_frag)){ prev=FRAG_PREV(f); if (prev->u.is_free){ /*join*/ qm_detach_free(qm, prev); size+=prev->size+FRAG_OVERHEAD; qm->real_used-=FRAG_OVERHEAD; qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */ f=prev; } } f->size=size; FRAG_END(f)->size=f->size; qm_insert_free(qm, f); //WLog_DBG(TAG, "Leave %s %d", __FUNCTION__, __LINE__); } void qm_lock(struct qm_block* qm) { DWORD i = 0; DWORD spin = 256; while (InterlockedCompareExchange((LONG*)&qm->lock, 1, 0) == 1) { for (; i < spin; ++i) if (qm->lock == 0) break; if (i >= spin) Sleep(1); /* yield cpu */ } } void qm_unlock(struct qm_block* qm) { InterlockedExchange((LONG*)&qm->lock, 0); }