1 | /* A replacement malloc with: |
---|
2 | - Much reduced code size; |
---|
3 | - Smaller RAM footprint; |
---|
4 | - The ability to handle downward-growing heaps; |
---|
5 | but |
---|
6 | - Slower; |
---|
7 | - Probably higher memory fragmentation; |
---|
8 | - Doesn't support threads (but, if it did support threads, |
---|
9 | it wouldn't need a global lock, only a compare-and-swap instruction); |
---|
10 | - Assumes the maximum alignment required is the alignment of a pointer; |
---|
11 | - Assumes that memory is already there and doesn't need to be allocated. |
---|
12 | |
---|
13 | * Synopsis of public routines |
---|
14 | |
---|
15 | malloc(size_t n); |
---|
16 | Return a pointer to a newly allocated chunk of at least n bytes, or null |
---|
17 | if no space is available. |
---|
18 | free(void* p); |
---|
19 | Release the chunk of memory pointed to by p, or no effect if p is null. |
---|
20 | realloc(void* p, size_t n); |
---|
21 | Return a pointer to a chunk of size n that contains the same data |
---|
22 | as does chunk p up to the minimum of (n, p's size) bytes, or null |
---|
23 | if no space is available. The returned pointer may or may not be |
---|
24 | the same as p. If p is null, equivalent to malloc. Unless the |
---|
25 | #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a |
---|
26 | size argument of zero (re)allocates a minimum-sized chunk. |
---|
27 | memalign(size_t alignment, size_t n); |
---|
28 | Return a pointer to a newly allocated chunk of n bytes, aligned |
---|
29 | in accord with the alignment argument, which must be a power of |
---|
30 | two. Will fail if 'alignment' is too large. |
---|
31 | calloc(size_t unit, size_t quantity); |
---|
32 | Returns a pointer to quantity * unit bytes, with all locations |
---|
33 | set to zero. |
---|
34 | cfree(void* p); |
---|
35 | Equivalent to free(p). |
---|
36 | malloc_trim(size_t pad); |
---|
37 | Release all but pad bytes of freed top-most memory back |
---|
38 | to the system. Return 1 if successful, else 0. |
---|
39 | malloc_usable_size(void* p); |
---|
40 | Report the number usable allocated bytes associated with allocated |
---|
41 | chunk p. This may or may not report more bytes than were requested, |
---|
42 | due to alignment and minimum size constraints. |
---|
43 | malloc_stats(); |
---|
44 | Prints brief summary statistics on stderr. |
---|
45 | mallinfo() |
---|
46 | Returns (by copy) a struct containing various summary statistics. |
---|
47 | mallopt(int parameter_number, int parameter_value) |
---|
48 | Changes one of the tunable parameters described below. Returns |
---|
49 | 1 if successful in changing the parameter, else 0. Actually, returns 0 |
---|
50 | always, as no parameter can be changed. |
---|
51 | */ |
---|
52 | |
---|
53 | #ifdef __xstormy16__ |
---|
54 | #define MALLOC_DIRECTION -1 |
---|
55 | #endif |
---|
56 | |
---|
57 | #ifndef MALLOC_DIRECTION |
---|
58 | #define MALLOC_DIRECTION 1 |
---|
59 | #endif |
---|
60 | |
---|
61 | #include <stddef.h> |
---|
62 | |
---|
63 | void* malloc(size_t); |
---|
64 | void free(void*); |
---|
65 | void* realloc(void*, size_t); |
---|
66 | void* memalign(size_t, size_t); |
---|
67 | void* valloc(size_t); |
---|
68 | void* pvalloc(size_t); |
---|
69 | void* calloc(size_t, size_t); |
---|
70 | void cfree(void*); |
---|
71 | int malloc_trim(size_t); |
---|
72 | size_t malloc_usable_size(void*); |
---|
73 | void malloc_stats(void); |
---|
74 | int mallopt(int, int); |
---|
75 | struct mallinfo mallinfo(void); |
---|
76 | |
---|
77 | typedef struct freelist_entry { |
---|
78 | size_t size; |
---|
79 | struct freelist_entry *next; |
---|
80 | } *fle; |
---|
81 | |
---|
82 | extern void * __malloc_end; |
---|
83 | extern fle __malloc_freelist; |
---|
84 | |
---|
85 | /* Return the number of bytes that need to be added to X to make it |
---|
86 | aligned to an ALIGN boundary. ALIGN must be a power of 2. */ |
---|
87 | #define M_ALIGN(x, align) (-(size_t)(x) & ((align) - 1)) |
---|
88 | |
---|
89 | /* Return the number of bytes that need to be subtracted from X to make it |
---|
90 | aligned to an ALIGN boundary. ALIGN must be a power of 2. */ |
---|
91 | #define M_ALIGN_SUB(x, align) ((size_t)(x) & ((align) - 1)) |
---|
92 | |
---|
93 | extern void __malloc_start; |
---|
94 | |
---|
95 | /* This is the minimum gap allowed between __malloc_end and the top of |
---|
96 | the stack. This is only checked for when __malloc_end is |
---|
97 | decreased; if instead the stack grows into the heap, silent data |
---|
98 | corruption will result. */ |
---|
99 | #define MALLOC_MINIMUM_GAP 32 |
---|
100 | |
---|
101 | #ifdef __xstormy16__ |
---|
102 | register void * stack_pointer asm ("r15"); |
---|
103 | #define MALLOC_LIMIT stack_pointer |
---|
104 | #else |
---|
105 | #define MALLOC_LIMIT __builtin_frame_address (0) |
---|
106 | #endif |
---|
107 | |
---|
108 | #if MALLOC_DIRECTION < 0 |
---|
109 | #define CAN_ALLOC_P(required) \ |
---|
110 | (((size_t) __malloc_end - (size_t)MALLOC_LIMIT \ |
---|
111 | - MALLOC_MINIMUM_GAP) >= (required)) |
---|
112 | #else |
---|
113 | #define CAN_ALLOC_P(required) \ |
---|
114 | (((size_t)MALLOC_LIMIT - (size_t) __malloc_end \ |
---|
115 | - MALLOC_MINIMUM_GAP) >= (required)) |
---|
116 | #endif |
---|
117 | |
---|
118 | /* real_size is the size we actually have to allocate, allowing for |
---|
119 | overhead and alignment. */ |
---|
120 | #define REAL_SIZE(sz) \ |
---|
121 | ((sz) < sizeof (struct freelist_entry) - sizeof (size_t) \ |
---|
122 | ? sizeof (struct freelist_entry) \ |
---|
123 | : sz + sizeof (size_t) + M_ALIGN(sz, sizeof (size_t))) |
---|
124 | |
---|
125 | #ifdef DEFINE_MALLOC |
---|
126 | |
---|
127 | void * __malloc_end = &__malloc_start; |
---|
128 | fle __malloc_freelist; |
---|
129 | |
---|
130 | void * |
---|
131 | malloc (size_t sz) |
---|
132 | { |
---|
133 | fle *nextfree; |
---|
134 | fle block; |
---|
135 | |
---|
136 | /* real_size is the size we actually have to allocate, allowing for |
---|
137 | overhead and alignment. */ |
---|
138 | size_t real_size = REAL_SIZE (sz); |
---|
139 | |
---|
140 | /* Look for the first block on the freelist that is large enough. */ |
---|
141 | for (nextfree = &__malloc_freelist; |
---|
142 | *nextfree; |
---|
143 | nextfree = &(*nextfree)->next) |
---|
144 | { |
---|
145 | block = *nextfree; |
---|
146 | |
---|
147 | if (block->size >= real_size) |
---|
148 | { |
---|
149 | /* If the block found is just the right size, remove it from |
---|
150 | the free list. Otherwise, split it. */ |
---|
151 | if (block->size < real_size + sizeof (struct freelist_entry)) |
---|
152 | { |
---|
153 | *nextfree = block->next; |
---|
154 | return (void *)&block->next; |
---|
155 | } |
---|
156 | else |
---|
157 | { |
---|
158 | size_t newsize = block->size - real_size; |
---|
159 | fle newnext = block->next; |
---|
160 | *nextfree = (fle)((size_t)block + real_size); |
---|
161 | (*nextfree)->size = newsize; |
---|
162 | (*nextfree)->next = newnext; |
---|
163 | goto done; |
---|
164 | } |
---|
165 | } |
---|
166 | |
---|
167 | /* If this is the last block on the freelist, and it was too small, |
---|
168 | enlarge it. */ |
---|
169 | if (! block->next |
---|
170 | && __malloc_end == (void *)((size_t)block + block->size)) |
---|
171 | { |
---|
172 | size_t moresize = real_size - block->size; |
---|
173 | if (! CAN_ALLOC_P (moresize)) |
---|
174 | return NULL; |
---|
175 | |
---|
176 | *nextfree = NULL; |
---|
177 | if (MALLOC_DIRECTION < 0) |
---|
178 | { |
---|
179 | block = __malloc_end = (void *)((size_t)block - moresize); |
---|
180 | } |
---|
181 | else |
---|
182 | { |
---|
183 | __malloc_end = (void *)((size_t)block + real_size); |
---|
184 | } |
---|
185 | |
---|
186 | goto done; |
---|
187 | } |
---|
188 | } |
---|
189 | |
---|
190 | /* No free space at the end of the free list. Allocate new space |
---|
191 | and use that. */ |
---|
192 | |
---|
193 | if (! CAN_ALLOC_P (real_size)) |
---|
194 | return NULL; |
---|
195 | |
---|
196 | if (MALLOC_DIRECTION > 0) |
---|
197 | { |
---|
198 | block = __malloc_end; |
---|
199 | __malloc_end = (void *)((size_t)__malloc_end + real_size); |
---|
200 | } |
---|
201 | else |
---|
202 | { |
---|
203 | block = __malloc_end = (void *)((size_t)__malloc_end - real_size); |
---|
204 | } |
---|
205 | done: |
---|
206 | block->size = real_size; |
---|
207 | return (void *)&block->next; |
---|
208 | } |
---|
209 | |
---|
210 | #endif |
---|
211 | |
---|
212 | #ifdef DEFINE_FREE |
---|
213 | |
---|
214 | void |
---|
215 | free (void *block_p) |
---|
216 | { |
---|
217 | fle *nextfree; |
---|
218 | fle block = (fle)((size_t) block_p - offsetof (struct freelist_entry, next)); |
---|
219 | |
---|
220 | if (block_p == NULL) |
---|
221 | return; |
---|
222 | |
---|
223 | /* Look on the freelist to see if there's a free block just before |
---|
224 | or just after this block. */ |
---|
225 | for (nextfree = &__malloc_freelist; |
---|
226 | *nextfree; |
---|
227 | nextfree = &(*nextfree)->next) |
---|
228 | { |
---|
229 | fle thisblock = *nextfree; |
---|
230 | if ((size_t)thisblock + thisblock->size == (size_t) block) |
---|
231 | { |
---|
232 | thisblock->size += block->size; |
---|
233 | if (MALLOC_DIRECTION > 0 |
---|
234 | && thisblock->next |
---|
235 | && (size_t) block + block->size == (size_t) thisblock->next) |
---|
236 | { |
---|
237 | thisblock->size += thisblock->next->size; |
---|
238 | thisblock->next = thisblock->next->next; |
---|
239 | } |
---|
240 | return; |
---|
241 | } |
---|
242 | else if ((size_t) thisblock == (size_t) block + block->size) |
---|
243 | { |
---|
244 | if (MALLOC_DIRECTION < 0 |
---|
245 | && thisblock->next |
---|
246 | && (size_t) block == ((size_t) thisblock->next |
---|
247 | + thisblock->next->size)) |
---|
248 | { |
---|
249 | *nextfree = thisblock->next; |
---|
250 | thisblock->next->size += block->size + thisblock->size; |
---|
251 | } |
---|
252 | else |
---|
253 | { |
---|
254 | block->size += thisblock->size; |
---|
255 | block->next = thisblock->next; |
---|
256 | *nextfree = block; |
---|
257 | } |
---|
258 | return; |
---|
259 | } |
---|
260 | else if ((MALLOC_DIRECTION > 0 |
---|
261 | && (size_t) thisblock > (size_t) block) |
---|
262 | || (MALLOC_DIRECTION < 0 |
---|
263 | && (size_t) thisblock < (size_t) block)) |
---|
264 | break; |
---|
265 | } |
---|
266 | |
---|
267 | block->next = *nextfree; |
---|
268 | *nextfree = block; |
---|
269 | return; |
---|
270 | } |
---|
271 | #endif |
---|
272 | |
---|
273 | #ifdef DEFINE_REALLOC |
---|
274 | void * |
---|
275 | realloc (void *block_p, size_t sz) |
---|
276 | { |
---|
277 | fle block = (fle)((size_t) block_p - offsetof (struct freelist_entry, next)); |
---|
278 | size_t real_size = REAL_SIZE (sz); |
---|
279 | size_t old_real_size; |
---|
280 | |
---|
281 | if (block_p == NULL) |
---|
282 | return malloc (sz); |
---|
283 | |
---|
284 | old_real_size = block->size; |
---|
285 | |
---|
286 | /* Perhaps we need to allocate more space. */ |
---|
287 | if (old_real_size < real_size) |
---|
288 | { |
---|
289 | void *result; |
---|
290 | size_t old_size = old_real_size - sizeof (size_t); |
---|
291 | |
---|
292 | /* Need to allocate, copy, and free. */ |
---|
293 | result = malloc (sz); |
---|
294 | if (result == NULL) |
---|
295 | return NULL; |
---|
296 | memcpy (result, block_p, old_size < sz ? old_size : sz); |
---|
297 | free (block_p); |
---|
298 | return result; |
---|
299 | } |
---|
300 | /* Perhaps we can free some space. */ |
---|
301 | if (old_real_size - real_size >= sizeof (struct freelist_entry)) |
---|
302 | { |
---|
303 | fle newblock = (fle)((size_t)block + real_size); |
---|
304 | block->size = real_size; |
---|
305 | newblock->size = old_real_size - real_size; |
---|
306 | free (&newblock->next); |
---|
307 | } |
---|
308 | return block_p; |
---|
309 | } |
---|
310 | #endif |
---|
311 | |
---|
312 | #ifdef DEFINE_CALLOC |
---|
313 | void * |
---|
314 | calloc (size_t n, size_t elem_size) |
---|
315 | { |
---|
316 | void *result; |
---|
317 | size_t sz = n * elem_size; |
---|
318 | result = malloc (sz); |
---|
319 | if (result != NULL) |
---|
320 | memset (result, 0, sz); |
---|
321 | return result; |
---|
322 | } |
---|
323 | #endif |
---|
324 | |
---|
325 | #ifdef DEFINE_CFREE |
---|
326 | void |
---|
327 | cfree (void *p) |
---|
328 | { |
---|
329 | free (p); |
---|
330 | } |
---|
331 | #endif |
---|
332 | |
---|
333 | #ifdef DEFINE_MEMALIGN |
---|
334 | void * |
---|
335 | memalign (size_t align, size_t sz) |
---|
336 | { |
---|
337 | fle *nextfree; |
---|
338 | fle block; |
---|
339 | |
---|
340 | /* real_size is the size we actually have to allocate, allowing for |
---|
341 | overhead and alignment. */ |
---|
342 | size_t real_size = REAL_SIZE (sz); |
---|
343 | |
---|
344 | /* Some sanity checking on 'align'. */ |
---|
345 | if ((align & (align - 1)) != 0 |
---|
346 | || align <= 0) |
---|
347 | return NULL; |
---|
348 | |
---|
349 | /* Look for the first block on the freelist that is large enough. */ |
---|
350 | /* One tricky part is this: We want the result to be a valid pointer |
---|
351 | to free. That means that there has to be room for a size_t |
---|
352 | before the block. If there's additional space before the block, |
---|
353 | it should go on the freelist, or it'll be lost---we could add it |
---|
354 | to the size of the block before it in memory, but finding the |
---|
355 | previous block is expensive. */ |
---|
356 | for (nextfree = &__malloc_freelist; |
---|
357 | ; |
---|
358 | nextfree = &(*nextfree)->next) |
---|
359 | { |
---|
360 | size_t before_size; |
---|
361 | size_t old_size; |
---|
362 | |
---|
363 | /* If we've run out of free blocks, allocate more space. */ |
---|
364 | if (! *nextfree) |
---|
365 | { |
---|
366 | old_size = real_size; |
---|
367 | if (MALLOC_DIRECTION < 0) |
---|
368 | { |
---|
369 | old_size += M_ALIGN_SUB (((size_t)__malloc_end |
---|
370 | - old_size + sizeof (size_t)), |
---|
371 | align); |
---|
372 | if (! CAN_ALLOC_P (old_size)) |
---|
373 | return NULL; |
---|
374 | block = __malloc_end = (void *)((size_t)__malloc_end - old_size); |
---|
375 | } |
---|
376 | else |
---|
377 | { |
---|
378 | block = __malloc_end; |
---|
379 | old_size += M_ALIGN ((size_t)__malloc_end + sizeof (size_t), |
---|
380 | align); |
---|
381 | if (! CAN_ALLOC_P (old_size)) |
---|
382 | return NULL; |
---|
383 | __malloc_end = (void *)((size_t)__malloc_end + old_size); |
---|
384 | } |
---|
385 | *nextfree = block; |
---|
386 | block->size = old_size; |
---|
387 | block->next = NULL; |
---|
388 | } |
---|
389 | else |
---|
390 | { |
---|
391 | block = *nextfree; |
---|
392 | old_size = block->size; |
---|
393 | } |
---|
394 | |
---|
395 | |
---|
396 | before_size = M_ALIGN (&block->next, align); |
---|
397 | if (before_size != 0) |
---|
398 | before_size = sizeof (*block) + M_ALIGN (&(block+1)->next, align); |
---|
399 | |
---|
400 | /* If this is the last block on the freelist, and it is too small, |
---|
401 | enlarge it. */ |
---|
402 | if (! block->next |
---|
403 | && old_size < real_size + before_size |
---|
404 | && __malloc_end == (void *)((size_t)block + block->size)) |
---|
405 | { |
---|
406 | if (MALLOC_DIRECTION < 0) |
---|
407 | { |
---|
408 | size_t moresize = real_size - block->size; |
---|
409 | moresize += M_ALIGN_SUB ((size_t)&block->next - moresize, align); |
---|
410 | if (! CAN_ALLOC_P (moresize)) |
---|
411 | return NULL; |
---|
412 | block = __malloc_end = (void *)((size_t)block - moresize); |
---|
413 | block->next = NULL; |
---|
414 | block->size = old_size = old_size + moresize; |
---|
415 | before_size = 0; |
---|
416 | } |
---|
417 | else |
---|
418 | { |
---|
419 | if (! CAN_ALLOC_P (before_size + real_size - block->size)) |
---|
420 | return NULL; |
---|
421 | __malloc_end = (void *)((size_t)block + before_size + real_size); |
---|
422 | block->size = old_size = before_size + real_size; |
---|
423 | } |
---|
424 | |
---|
425 | /* Two out of the four cases below will now be possible; which |
---|
426 | two depends on MALLOC_DIRECTION. */ |
---|
427 | } |
---|
428 | |
---|
429 | if (old_size >= real_size + before_size) |
---|
430 | { |
---|
431 | /* This block will do. If there needs to be space before it, |
---|
432 | split the block. */ |
---|
433 | if (before_size != 0) |
---|
434 | { |
---|
435 | fle old_block = block; |
---|
436 | |
---|
437 | old_block->size = before_size; |
---|
438 | block = (fle)((size_t)block + before_size); |
---|
439 | |
---|
440 | /* If there's no space after the block, we're now nearly |
---|
441 | done; just make a note of the size required. |
---|
442 | Otherwise, we need to create a new free space block. */ |
---|
443 | if (old_size - before_size |
---|
444 | <= real_size + sizeof (struct freelist_entry)) |
---|
445 | { |
---|
446 | block->size = old_size - before_size; |
---|
447 | return (void *)&block->next; |
---|
448 | } |
---|
449 | else |
---|
450 | { |
---|
451 | fle new_block; |
---|
452 | new_block = (fle)((size_t)block + real_size); |
---|
453 | new_block->size = old_size - before_size - real_size; |
---|
454 | if (MALLOC_DIRECTION > 0) |
---|
455 | { |
---|
456 | new_block->next = old_block->next; |
---|
457 | old_block->next = new_block; |
---|
458 | } |
---|
459 | else |
---|
460 | { |
---|
461 | new_block->next = old_block; |
---|
462 | *nextfree = new_block; |
---|
463 | } |
---|
464 | goto done; |
---|
465 | } |
---|
466 | } |
---|
467 | else |
---|
468 | { |
---|
469 | /* If the block found is just the right size, remove it from |
---|
470 | the free list. Otherwise, split it. */ |
---|
471 | if (old_size <= real_size + sizeof (struct freelist_entry)) |
---|
472 | { |
---|
473 | *nextfree = block->next; |
---|
474 | return (void *)&block->next; |
---|
475 | } |
---|
476 | else |
---|
477 | { |
---|
478 | size_t newsize = old_size - real_size; |
---|
479 | fle newnext = block->next; |
---|
480 | *nextfree = (fle)((size_t)block + real_size); |
---|
481 | (*nextfree)->size = newsize; |
---|
482 | (*nextfree)->next = newnext; |
---|
483 | goto done; |
---|
484 | } |
---|
485 | } |
---|
486 | } |
---|
487 | } |
---|
488 | |
---|
489 | done: |
---|
490 | block->size = real_size; |
---|
491 | return (void *)&block->next; |
---|
492 | } |
---|
493 | #endif |
---|
494 | |
---|
495 | #ifdef DEFINE_VALLOC |
---|
496 | void * |
---|
497 | valloc (size_t sz) |
---|
498 | { |
---|
499 | return memalign (128, sz); |
---|
500 | } |
---|
501 | #endif |
---|
502 | #ifdef DEFINE_PVALLOC |
---|
503 | void * |
---|
504 | pvalloc (size_t sz) |
---|
505 | { |
---|
506 | return memalign (128, sz + M_ALIGN (sz, 128)); |
---|
507 | } |
---|
508 | #endif |
---|
509 | |
---|
510 | #ifdef DEFINE_MALLINFO |
---|
511 | #include "malloc.h" |
---|
512 | |
---|
513 | struct mallinfo |
---|
514 | mallinfo (void) |
---|
515 | { |
---|
516 | struct mallinfo r; |
---|
517 | fle fr; |
---|
518 | size_t free_size; |
---|
519 | size_t total_size; |
---|
520 | size_t free_blocks; |
---|
521 | |
---|
522 | memset (&r, 0, sizeof (r)); |
---|
523 | |
---|
524 | free_size = 0; |
---|
525 | free_blocks = 0; |
---|
526 | for (fr = __malloc_freelist; fr; fr = fr->next) |
---|
527 | { |
---|
528 | free_size += fr->size; |
---|
529 | free_blocks++; |
---|
530 | if (! fr->next) |
---|
531 | { |
---|
532 | int atend; |
---|
533 | if (MALLOC_DIRECTION > 0) |
---|
534 | atend = (void *)((size_t)fr + fr->size) == __malloc_end; |
---|
535 | else |
---|
536 | atend = (void *)fr == __malloc_end; |
---|
537 | if (atend) |
---|
538 | r.keepcost = fr->size; |
---|
539 | } |
---|
540 | } |
---|
541 | |
---|
542 | if (MALLOC_DIRECTION > 0) |
---|
543 | total_size = (char *)__malloc_end - (char *)&__malloc_start; |
---|
544 | else |
---|
545 | total_size = (char *)&__malloc_start - (char *)__malloc_end; |
---|
546 | |
---|
547 | #ifdef DEBUG |
---|
548 | /* Fixme: should walk through all the in-use blocks and see if |
---|
549 | they're valid. */ |
---|
550 | #endif |
---|
551 | |
---|
552 | r.arena = total_size; |
---|
553 | r.fordblks = free_size; |
---|
554 | r.uordblks = total_size - free_size; |
---|
555 | r.ordblks = free_blocks; |
---|
556 | return r; |
---|
557 | } |
---|
558 | #endif |
---|
559 | |
---|
560 | #ifdef DEFINE_MALLOC_STATS |
---|
561 | #include "malloc.h" |
---|
562 | #include <stdio.h> |
---|
563 | |
---|
564 | void |
---|
565 | malloc_stats(void) |
---|
566 | { |
---|
567 | struct mallinfo i; |
---|
568 | FILE *fp; |
---|
569 | |
---|
570 | fp = stderr; |
---|
571 | i = mallinfo(); |
---|
572 | fprintf (fp, "malloc has reserved %u bytes between %p and %p\n", |
---|
573 | i.arena, &__malloc_start, __malloc_end); |
---|
574 | fprintf (fp, "there are %u bytes free in %u chunks\n", |
---|
575 | i.fordblks, i.ordblks); |
---|
576 | fprintf (fp, "of which %u bytes are at the end of the reserved space\n", |
---|
577 | i.keepcost); |
---|
578 | fprintf (fp, "and %u bytes are in use.\n", i.uordblks); |
---|
579 | } |
---|
580 | #endif |
---|
581 | |
---|
582 | #ifdef DEFINE_MALLOC_USABLE_SIZE |
---|
583 | size_t |
---|
584 | malloc_usable_size (void *block_p) |
---|
585 | { |
---|
586 | fle block = (fle)((size_t) block_p - offsetof (struct freelist_entry, next)); |
---|
587 | return block->size - sizeof (size_t); |
---|
588 | } |
---|
589 | #endif |
---|
590 | |
---|
591 | #ifdef DEFINE_MALLOPT |
---|
592 | int |
---|
593 | mallopt (int n, int v) |
---|
594 | { |
---|
595 | (void)n; (void)v; |
---|
596 | return 0; |
---|
597 | } |
---|
598 | #endif |
---|