[1] | 1 | /* |
---|
| 2 | This file is part of MutekP. |
---|
| 3 | |
---|
| 4 | MutekP is free software; you can redistribute it and/or modify it |
---|
| 5 | under the terms of the GNU General Public License as published by |
---|
| 6 | the Free Software Foundation; either version 2 of the License, or |
---|
| 7 | (at your option) any later version. |
---|
| 8 | |
---|
| 9 | MutekP is distributed in the hope that it will be useful, but |
---|
| 10 | WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 12 | General Public License for more details. |
---|
| 13 | |
---|
| 14 | You should have received a copy of the GNU General Public License |
---|
| 15 | along with MutekP; if not, write to the Free Software Foundation, |
---|
| 16 | Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 17 | |
---|
| 18 | UPMC / LIP6 / SOC (c) 2009 |
---|
| 19 | Copyright Ghassan Almaless <ghassan.almaless@gmail.com> |
---|
| 20 | */ |
---|
| 21 | |
---|
| 22 | #include <sys/types.h> |
---|
| 23 | #include <sys/syscall.h> |
---|
| 24 | #include <cpu-syscall.h> |
---|
| 25 | #include <errno.h> |
---|
| 26 | #include <pthread.h> |
---|
| 27 | #include <assert.h> |
---|
| 28 | #include <unistd.h> |
---|
| 29 | #include <string.h> |
---|
| 30 | #include <stdlib.h> |
---|
| 31 | |
---|
| 32 | #ifndef CONFIG_LIBC_MALLOC_DEBUG |
---|
| 33 | #define CONFIG_LIBC_MALLOC_DEBUG 0 |
---|
| 34 | #endif |
---|
| 35 | |
---|
| 36 | #undef dmsg_r |
---|
| 37 | #define dmsg_r(...) |
---|
| 38 | |
---|
| 39 | |
---|
| 40 | typedef struct heap_manager_s |
---|
| 41 | { |
---|
| 42 | pthread_spinlock_t lock; |
---|
| 43 | |
---|
| 44 | volatile uint_t limit __CACHELINE; |
---|
| 45 | volatile uint_t current __CACHELINE; |
---|
| 46 | volatile uint_t next __CACHELINE; |
---|
| 47 | volatile uint_t last __CACHELINE; |
---|
| 48 | |
---|
| 49 | uint_t start; |
---|
| 50 | uint_t sbrk_size; |
---|
| 51 | }heap_manager_t; |
---|
| 52 | |
---|
| 53 | struct block_info_s |
---|
| 54 | { |
---|
| 55 | uint_t busy:1; |
---|
| 56 | uint_t size:31; |
---|
| 57 | struct block_info_s *ptr; |
---|
| 58 | } __attribute__ ((packed)); |
---|
| 59 | |
---|
| 60 | typedef struct block_info_s block_info_t; |
---|
| 61 | |
---|
| 62 | static heap_manager_t heap_mgr; |
---|
| 63 | extern uint_t __bss_end; |
---|
| 64 | static int cacheline_size; |
---|
| 65 | static int page_size; |
---|
| 66 | |
---|
| 67 | #define ARROUND_UP(val, size) ((val) & ((size) -1)) ? ((val) & ~((size)-1)) + (size) : (val) |
---|
| 68 | |
---|
| 69 | void __heap_manager_init(void) |
---|
| 70 | { |
---|
| 71 | block_info_t *info; |
---|
| 72 | uint_t initial_size; |
---|
| 73 | |
---|
| 74 | initial_size = (uint_t) cpu_syscall(NULL, NULL, NULL, NULL, SYS_SBRK); |
---|
| 75 | |
---|
| 76 | if(initial_size < 4096) |
---|
| 77 | initial_size = 0x100000; /* subject to segfault */ |
---|
| 78 | |
---|
| 79 | pthread_spin_init(&heap_mgr.lock, 0); |
---|
| 80 | heap_mgr.start = (uint_t)&__bss_end; |
---|
| 81 | heap_mgr.limit = heap_mgr.start + initial_size; |
---|
| 82 | |
---|
| 83 | info = (block_info_t*)heap_mgr.start; |
---|
| 84 | info->busy = 0; |
---|
| 85 | info->size = heap_mgr.limit - heap_mgr.start; |
---|
| 86 | info->ptr = NULL; |
---|
| 87 | |
---|
| 88 | #if CONFIG_LIBC_MALLOC_DEBUG |
---|
| 89 | printf("%s: start %x, info %x, size %d\n", |
---|
| 90 | __FUNCTION__, |
---|
| 91 | (unsigned)heap_mgr.start, |
---|
| 92 | (unsigned)info, |
---|
| 93 | ((block_info_t*)heap_mgr.start)->size); |
---|
| 94 | #endif |
---|
| 95 | |
---|
| 96 | heap_mgr.current = heap_mgr.start; |
---|
| 97 | heap_mgr.next = heap_mgr.start; |
---|
| 98 | heap_mgr.last = heap_mgr.start; |
---|
| 99 | cacheline_size = sysconf(_SC_CACHE_LINE_SIZE); |
---|
| 100 | cacheline_size = (cacheline_size <= 0) ? 64 : cacheline_size; |
---|
| 101 | page_size = sysconf(_SC_PAGE_SIZE); |
---|
| 102 | page_size = (page_size <= 0) ? 4096 : page_size; |
---|
| 103 | |
---|
| 104 | heap_mgr.sbrk_size = 2 * (initial_size / page_size); |
---|
| 105 | } |
---|
| 106 | |
---|
| 107 | void* do_malloc(size_t, int); |
---|
| 108 | |
---|
| 109 | void* malloc(size_t size) |
---|
| 110 | { |
---|
| 111 | block_info_t *info; |
---|
| 112 | void *ptr; |
---|
| 113 | int err; |
---|
| 114 | int tries_nr; |
---|
| 115 | uint_t blksz; |
---|
| 116 | |
---|
| 117 | int pid = getpid(); |
---|
| 118 | |
---|
| 119 | #if 1 |
---|
| 120 | dmsg_r(stderr, "libc: %s: pid %d, tid %d, 0x%x started\n", |
---|
| 121 | __FUNCTION__, pid, (int)pthread_self(), (unsigned)&heap_mgr.lock); |
---|
| 122 | #endif |
---|
| 123 | |
---|
| 124 | #undef MALLOC_THRESHOLD |
---|
| 125 | #define MALLOC_THRESHOLD 5 |
---|
| 126 | |
---|
| 127 | #undef MALLOC_LIMIT |
---|
| 128 | #define MALLOC_LIMIT 20 |
---|
| 129 | |
---|
| 130 | tries_nr = 0; |
---|
| 131 | ptr = NULL; |
---|
| 132 | err = 0; |
---|
| 133 | |
---|
| 134 | while((ptr == NULL) && (tries_nr < MALLOC_LIMIT) && (err != ENOMEM)) |
---|
| 135 | { |
---|
| 136 | ptr = do_malloc(size, tries_nr); |
---|
| 137 | |
---|
| 138 | if(ptr == NULL) |
---|
| 139 | { |
---|
| 140 | blksz = size / page_size; |
---|
| 141 | blksz = (blksz < heap_mgr.sbrk_size) ? heap_mgr.sbrk_size : (blksz + heap_mgr.sbrk_size); |
---|
| 142 | |
---|
| 143 | err = (int) cpu_syscall((void*)heap_mgr.limit, |
---|
| 144 | (void*)blksz, |
---|
| 145 | NULL, NULL, SYS_SBRK); |
---|
| 146 | if(err != 0) |
---|
| 147 | continue; |
---|
| 148 | |
---|
| 149 | blksz = blksz * page_size; |
---|
| 150 | |
---|
| 151 | pthread_spin_lock(&heap_mgr.lock); |
---|
| 152 | |
---|
| 153 | heap_mgr.limit += blksz; |
---|
| 154 | |
---|
| 155 | info = (block_info_t*)heap_mgr.last; |
---|
| 156 | |
---|
| 157 | if(info->busy) |
---|
| 158 | { |
---|
| 159 | info = (block_info_t*)(heap_mgr.last + info->size); |
---|
| 160 | info->busy = 0; |
---|
| 161 | info->size = blksz; |
---|
| 162 | info->ptr = NULL; |
---|
| 163 | heap_mgr.last = (uint_t)info; |
---|
| 164 | } |
---|
| 165 | else |
---|
| 166 | info->size += blksz; |
---|
| 167 | |
---|
| 168 | pthread_spin_unlock(&heap_mgr.lock); |
---|
| 169 | |
---|
| 170 | if(++tries_nr > MALLOC_THRESHOLD) |
---|
| 171 | pthread_yield(); |
---|
| 172 | } |
---|
| 173 | } |
---|
| 174 | |
---|
| 175 | #if 1 |
---|
| 176 | dmsg_r(stderr, "libc: %s: pid %d, tid %d, 0x%x return 0x%x\n", |
---|
| 177 | __FUNCTION__, pid, (unsigned)pthread_self(), (unsigned)&heap_mgr.lock, (unsigned)ptr); |
---|
| 178 | #endif |
---|
| 179 | |
---|
| 180 | return ptr; |
---|
| 181 | } |
---|
| 182 | |
---|
| 183 | void* do_malloc(size_t size, int round) |
---|
| 184 | { |
---|
| 185 | block_info_t *current; |
---|
| 186 | block_info_t *next; |
---|
| 187 | register size_t effective_size; |
---|
| 188 | |
---|
| 189 | effective_size = size + sizeof(*current); |
---|
| 190 | effective_size = ARROUND_UP(effective_size, cacheline_size); |
---|
| 191 | |
---|
| 192 | if((round == 0) && (heap_mgr.next > (heap_mgr.limit - effective_size))) |
---|
| 193 | return NULL; |
---|
| 194 | |
---|
| 195 | pthread_spin_lock(&heap_mgr.lock); |
---|
| 196 | |
---|
| 197 | if(heap_mgr.next > (heap_mgr.limit - effective_size)) |
---|
| 198 | current = (block_info_t*)heap_mgr.start; |
---|
| 199 | else |
---|
| 200 | current = (block_info_t*)heap_mgr.next; |
---|
| 201 | |
---|
| 202 | #if CONFIG_LIBC_MALLOC_DEBUG |
---|
| 203 | int cpu; |
---|
| 204 | pthread_attr_getcpuid_np(&cpu); |
---|
| 205 | |
---|
| 206 | fprintf(stderr, |
---|
| 207 | "%s: cpu %d Started [sz %d, esz %d, pg %d, cl %d, start %x, next %x," |
---|
| 208 | "last %x, current %x, limit %x, busy %d, size %d]\n", |
---|
| 209 | __FUNCTION__, |
---|
| 210 | cpu, |
---|
| 211 | size, |
---|
| 212 | effective_size, |
---|
| 213 | page_size, |
---|
| 214 | cacheline_size, |
---|
| 215 | (unsigned)heap_mgr.start, |
---|
| 216 | (unsigned)heap_mgr.next, |
---|
| 217 | (unsigned)heap_mgr.last, |
---|
| 218 | (unsigned)current, |
---|
| 219 | (unsigned)heap_mgr.limit, |
---|
| 220 | current->busy, |
---|
| 221 | current->size); |
---|
| 222 | #endif |
---|
| 223 | |
---|
| 224 | while(current->busy || (current->size < effective_size)) |
---|
| 225 | { |
---|
| 226 | if((current->busy) && (current->size == 0)) |
---|
| 227 | fprintf(stderr, "Corrupted memory block descriptor: @0x%x\n", (unsigned int)current); |
---|
| 228 | |
---|
| 229 | current = (block_info_t*) ((char*)current + current->size); |
---|
| 230 | |
---|
| 231 | |
---|
| 232 | if((uint_t)current >= heap_mgr.limit) |
---|
| 233 | { |
---|
| 234 | pthread_spin_unlock(&heap_mgr.lock); |
---|
| 235 | |
---|
| 236 | #if CONFIG_LIBC_MALLOC_DEBUG |
---|
| 237 | int cpu; |
---|
| 238 | pthread_attr_getcpuid_np(&cpu); |
---|
| 239 | |
---|
| 240 | fprintf(stderr, "[%s] cpu %d, pid %d, tid %d, limit 0x%x, ended with ENOMEM\n", |
---|
| 241 | __FUNCTION__, |
---|
| 242 | cpu, |
---|
| 243 | (unsigned)getpid(), |
---|
| 244 | (unsigned)pthread_self(), |
---|
| 245 | (unsigned)heap_mgr.limit); |
---|
| 246 | #endif |
---|
| 247 | return NULL; |
---|
| 248 | } |
---|
| 249 | |
---|
| 250 | dmsg_r(stderr, "%s: current 0x%x, current->busy %d, current->size %d, limit %d [%d]\n", |
---|
| 251 | __FUNCTION__, |
---|
| 252 | (unsigned)current, |
---|
| 253 | (unsigned)current->busy, |
---|
| 254 | (unsigned)current->size, |
---|
| 255 | (unsigned)(heap_mgr.limit - sizeof(*current)), |
---|
| 256 | (unsigned)heap_mgr.limit); |
---|
| 257 | } |
---|
| 258 | |
---|
| 259 | if((current->size - effective_size) >= (uint_t)cacheline_size) |
---|
| 260 | { |
---|
| 261 | next = (block_info_t*) ((uint_t)current + effective_size); |
---|
| 262 | next->size = current->size - effective_size; |
---|
| 263 | next->busy = 0; |
---|
| 264 | current->size = effective_size; |
---|
| 265 | current->busy = 1; |
---|
| 266 | } |
---|
| 267 | else |
---|
| 268 | { |
---|
| 269 | current->busy = 1; |
---|
| 270 | next = (block_info_t*)((uint_t)current + current->size); |
---|
| 271 | } |
---|
| 272 | |
---|
| 273 | heap_mgr.next = (uint_t)next; |
---|
| 274 | current->ptr = NULL; |
---|
| 275 | |
---|
| 276 | #if CONFIG_LIBC_MALLOC_DEBUG |
---|
| 277 | fprintf(stderr, "%s: cpu %d End [ 0x%x ]\n", __FUNCTION__, cpu, ((unsigned int)current) + sizeof(*current)); |
---|
| 278 | #endif |
---|
| 279 | |
---|
| 280 | if(((uint_t)next + next->size) >= heap_mgr.limit) |
---|
| 281 | heap_mgr.last = (uint_t)next; |
---|
| 282 | |
---|
| 283 | pthread_spin_unlock(&heap_mgr.lock); |
---|
| 284 | return (char*)current + sizeof(*current); |
---|
| 285 | } |
---|
| 286 | |
---|
| 287 | void free(void *ptr) |
---|
| 288 | { |
---|
| 289 | block_info_t *current; |
---|
| 290 | block_info_t *next; |
---|
| 291 | size_t limit; |
---|
| 292 | |
---|
| 293 | if(ptr == NULL) |
---|
| 294 | return; |
---|
| 295 | |
---|
| 296 | current = (block_info_t*) ((char*)ptr - sizeof(*current)); |
---|
| 297 | current = (current->ptr != NULL) ? current->ptr : current; |
---|
| 298 | |
---|
| 299 | pthread_spin_lock(&heap_mgr.lock); |
---|
| 300 | limit = heap_mgr.limit; |
---|
| 301 | current->busy = 0; |
---|
| 302 | |
---|
| 303 | while (1) |
---|
| 304 | { |
---|
| 305 | next = (block_info_t*) ((char*) current + current->size); |
---|
| 306 | |
---|
| 307 | if (((uint_t)next >= limit) || (next->busy == 1)) |
---|
| 308 | break; |
---|
| 309 | |
---|
| 310 | current->size += next->size; |
---|
| 311 | } |
---|
| 312 | |
---|
| 313 | if((uint_t)current < heap_mgr.next) |
---|
| 314 | heap_mgr.next = (uint_t) current; |
---|
| 315 | |
---|
| 316 | pthread_spin_unlock(&heap_mgr.lock); |
---|
| 317 | } |
---|
| 318 | |
---|
| 319 | |
---|
| 320 | void* realloc(void *ptr, size_t size) |
---|
| 321 | { |
---|
| 322 | block_info_t *info; |
---|
| 323 | uint_t old_size; |
---|
| 324 | void* new_zone; |
---|
| 325 | void* block; |
---|
| 326 | |
---|
| 327 | if(!ptr) |
---|
| 328 | return malloc(size); |
---|
| 329 | |
---|
| 330 | if(!size) |
---|
| 331 | { |
---|
| 332 | free(ptr); |
---|
| 333 | return NULL; |
---|
| 334 | } |
---|
| 335 | |
---|
| 336 | /* Try to reuse cache lines */ |
---|
| 337 | info = (block_info_t*)((char*)ptr - sizeof(*info)); |
---|
| 338 | old_size = info->size; |
---|
| 339 | |
---|
| 340 | if(size <= old_size) |
---|
| 341 | return ptr; |
---|
| 342 | |
---|
| 343 | /* Allocate new block */ |
---|
| 344 | if(info->ptr == NULL) |
---|
| 345 | { |
---|
| 346 | new_zone = malloc(size); |
---|
| 347 | } |
---|
| 348 | else |
---|
| 349 | { |
---|
| 350 | info = info->ptr; |
---|
| 351 | new_zone = valloc(size); |
---|
| 352 | } |
---|
| 353 | |
---|
| 354 | if(new_zone == NULL) |
---|
| 355 | return NULL; |
---|
| 356 | |
---|
| 357 | memcpy(new_zone, ptr, old_size); |
---|
| 358 | |
---|
| 359 | block = (char*)info + sizeof(*info); |
---|
| 360 | |
---|
| 361 | free(block); |
---|
| 362 | return new_zone; |
---|
| 363 | } |
---|
| 364 | |
---|
| 365 | void* calloc(size_t nmemb, size_t size) |
---|
| 366 | { |
---|
| 367 | void *ptr = malloc(nmemb * size); |
---|
| 368 | |
---|
| 369 | if(ptr == NULL) |
---|
| 370 | return NULL; |
---|
| 371 | |
---|
| 372 | memset(ptr, 0, (nmemb * size)); |
---|
| 373 | |
---|
| 374 | return ptr; |
---|
| 375 | } |
---|
| 376 | |
---|
| 377 | void* valloc(size_t size) |
---|
| 378 | { |
---|
| 379 | void *ptr; |
---|
| 380 | uintptr_t block; |
---|
| 381 | block_info_t *info; |
---|
| 382 | size_t asked_size; |
---|
| 383 | |
---|
| 384 | asked_size = size; |
---|
| 385 | size = ARROUND_UP(size, page_size); |
---|
| 386 | block = (uintptr_t) malloc(size + page_size); |
---|
| 387 | |
---|
| 388 | if(block == 0) return NULL; |
---|
| 389 | |
---|
| 390 | ptr = (void*)((block + page_size) - (block % page_size)); |
---|
| 391 | info = (block_info_t*) ((char*)ptr - sizeof(*info)); |
---|
| 392 | |
---|
| 393 | info->busy = 1; |
---|
| 394 | info->size = asked_size; |
---|
| 395 | info->ptr = (block_info_t*)(block - sizeof(*info)); |
---|
| 396 | |
---|
| 397 | return ptr; |
---|
| 398 | } |
---|