[关闭]
@H4l0 2019-06-16T02:59:59.000000Z 字数 12753 阅读 1672

glibc 代码阅读

源代码分析


前言

分析一下 free 函数过程的代码,在分析时如果遇到 pwn 中可以利用的点会概括一下利用思路。

free

与 free 函数相关的代码

  1. free -> __libc_free -> _int_free

__libc_free 函数

整体代码:

  1. void
  2. __libc_free (void *mem)
  3. {
  4. mstate ar_ptr;
  5. mchunkptr p; /* chunk corresponding to mem */
  6. void (*hook) (void *, const void *)
  7. = atomic_forced_read (__free_hook);
  8. if (__builtin_expect (hook != NULL, 0))
  9. {
  10. (*hook)(mem, RETURN_ADDRESS (0));
  11. return;
  12. }
  13. if (mem == 0) /* free(0) has no effect */
  14. return;
  15. p = mem2chunk (mem);
  16. if (chunk_is_mmapped (p)) /* release mmapped memory. */
  17. {
  18. /* See if the dynamic brk/mmap threshold needs adjusting.
  19. Dumped fake mmapped chunks do not affect the threshold. */
  20. if (!mp_.no_dyn_threshold
  21. && chunksize_nomask (p) > mp_.mmap_threshold
  22. && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
  23. && !DUMPED_MAIN_ARENA_CHUNK (p))
  24. {
  25. mp_.mmap_threshold = chunksize (p);
  26. mp_.trim_threshold = 2 * mp_.mmap_threshold;
  27. LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
  28. mp_.mmap_threshold, mp_.trim_threshold);
  29. }
  30. munmap_chunk (p);
  31. return;
  32. }
  33. MAYBE_INIT_TCACHE ();
  34. ar_ptr = arena_for_chunk (p);
  35. _int_free (ar_ptr, p, 0);
  36. }

传入的参数

  1. void *mem // 通用类型的指针,指向 memory 区域

函数处理步骤

  1. 定义的两个变量:
  1. mstate ar_ptr; // 指向 malloc_state 结构体的指针
  2. mchunkptr p; //

malloc_state 的定义:

  1. typedef struct malloc_state *mstate;
  2. struct malloc_state
  3. {
  4. /* Serialize access. */
  5. __libc_lock_define (, mutex);
  6. /* Flags (formerly in max_fast). */
  7. int flags;
  8. /* Set if the fastbin chunks contain recently inserted free blocks. */
  9. /* Note this is a bool but not all targets support atomics on booleans. */
  10. int have_fastchunks;
  11. /* Fastbins */
  12. mfastbinptr fastbinsY[NFASTBINS];
  13. /* Base of the topmost chunk -- not otherwise kept in a bin */
  14. mchunkptr top;
  15. /* The remainder from the most recent split of a small request */
  16. mchunkptr last_remainder;
  17. /* Normal bins packed as described above */
  18. mchunkptr bins[NBINS * 2 - 2];
  19. /* Bitmap of bins */
  20. unsigned int binmap[BINMAPSIZE];
  21. /* Linked list */
  22. struct malloc_state *next;
  23. /* Linked list for free arenas. Access to this field is serialized
  24. by free_list_lock in arena.c. */
  25. struct malloc_state *next_free;
  26. /* Number of threads attached to this arena. 0 if the arena is on
  27. the free list. Access to this field is serialized by
  28. free_list_lock in arena.c. */
  29. INTERNAL_SIZE_T attached_threads;
  30. /* Memory allocated from the system in this arena. */
  31. INTERNAL_SIZE_T system_mem;
  32. INTERNAL_SIZE_T max_system_mem;
  33. };

mchunkptr 为指向 malloc_chunk 结构体的指针

  1. typedef struct malloc_chunk* mchunkptr;
  2. struct malloc_chunk {
  3. INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
  4. INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
  5. struct malloc_chunk* fd; /* double links -- used only if free. */
  6. struct malloc_chunk* bk;
  7. /* Only used for large blocks: pointer to next larger size. */
  8. struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
  9. struct malloc_chunk* bk_nextsize;
  10. };

2 . 定义一个 hook 的函数指针,将 __free_hook 函数指针赋值给他。

  1. void (*hook) (void *, const void *)
  2. = atomic_forced_read (__free_hook);
  3. if (__builtin_expect (hook != NULL, 0))
  4. {
  5. (*hook)(mem, RETURN_ADDRESS (0));
  6. return;
  7. }

如果 (*hook) 的值不为空就执行函数,参数为 mem 指针,和一个返回地址的定义

  1. #define RETURN_ADDRESS(nr) \
  2. __builtin_extract_return_addr (__builtin_return_address (nr))

__free_hook 默认是一个空指针。同样是定义在 malloc.c 中

  1. void weak_variable (*__free_hook) (void *__ptr,const void *) = NULL;

3 . 判断 mem 是不是一个指针,如果指向 0 继续 free 的话是无影响的。

同时通过 mem2chunk 函数取得传入的 mem 对应的 chunk 的指针给变量 p。

  1. if (mem == 0) /* free(0) has no effect */
  2. return;
  3. p = mem2chunk (mem);

mem2chunk 函数的定义:

  1. #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))

这里定义的比较清楚了,拿到 mem 的指针(void *)转换为 char 类型的指针(1 字节),减去 prev_size 和 size 的大小,就得到了指向当前 chunk 的指针

也就是和下面表示的从 mem 指针到 chunk 指针的过程一样。

  1. chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  2. | Size of previous chunk, if unallocated (P clear) |
  3. +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  4. | Size of chunk, in bytes |A|M|P|
  5. mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+

4 . 检查 p 是否为 mmap 映射过来的内存区域

  1. if (chunk_is_mmapped (p)) /* release mmapped memory. */
  2. {
  3. /* See if the dynamic brk/mmap threshold needs adjusting.
  4. Dumped fake mmapped chunks do not affect the threshold. */
  5. if (!mp_.no_dyn_threshold
  6. && chunksize_nomask (p) > mp_.mmap_threshold
  7. && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
  8. && !DUMPED_MAIN_ARENA_CHUNK (p))
  9. {
  10. mp_.mmap_threshold = chunksize (p);
  11. mp_.trim_threshold = 2 * mp_.mmap_threshold;
  12. LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
  13. mp_.mmap_threshold, mp_.trim_threshold);
  14. }
  15. munmap_chunk (p);

如果根据 size 区域的 M 指示确实是 mmap 的内存的话,经过一系列的判断就调用 munmap_chunk 函数来 free 掉这块内存。

  1. #define IS_MMAPPED 0x2 /* check for mmap()'ed chunk */
  2. #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)

通过 p 结构体指针拿到他的 size 的值,与 0x2 直接异或,如果不为 0 就说明是 M 标志是 1,也就是通过 mmap 分配过来的内存。

判断的条件涉及到的东西太多就暂时不看了。

跟进 munmap_chunk 函数

直接看 munmap_chunk 函数的定义:

  1. munmap_chunk (mchunkptr p)
  2. {
  3. size_t pagesize = GLRO (dl_pagesize);
  4. INTERNAL_SIZE_T size = chunksize (p);
  5. assert (chunk_is_mmapped (p));
  6. /* Do nothing if the chunk is a faked mmapped chunk in the dumped
  7. main arena. We never free this memory. */
  8. if (DUMPED_MAIN_ARENA_CHUNK (p))
  9. return;
  10. uintptr_t mem = (uintptr_t) chunk2mem (p);
  11. uintptr_t block = (uintptr_t) p - prev_size (p);
  12. size_t total_size = prev_size (p) + size;
  13. /* Unfortunately we have to do the compilers job by hand here. Normally
  14. we would test BLOCK and TOTAL-SIZE separately for compliance with the
  15. page size. But gcc does not recognize the optimization possibility
  16. (in the moment at least) so we combine the two values into one before
  17. the bit test. */
  18. if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
  19. || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
  20. malloc_printerr ("munmap_chunk(): invalid pointer");
  21. atomic_decrement (&mp_.n_mmaps);
  22. atomic_add (&mp_.mmapped_mem, -total_size);
  23. /* If munmap failed the process virtual memory address space is in a
  24. bad shape. Just leave the block hanging around, the process will
  25. terminate shortly anyway since not much can be done. */
  26. __munmap ((char *) block, total_size);
  27. }

前面也是两个定义:

获取 pagesize 和 chunk 的 size 的大小。

  1. size_t pagesize = GLRO (dl_pagesize);
  2. INTERNAL_SIZE_T size = chunksize (p);

获取 chunk 的 mem 指针,并获取上一个 chunk 的地址和这两个 chunk 总的大小(total_size),看样子要进行合并操作。

  1. uintptr_t mem = (uintptr_t) chunk2mem (p);
  2. uintptr_t block = (uintptr_t) p - prev_size (p);
  3. size_t total_size = prev_size (p) + size;
  4. typedef unsigned long int uintptr_t;

合法性判断:

  1. if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
  2. || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
  3. malloc_printerr ("munmap_chunk(): invalid pointer");
  4. # define __glibc_unlikely(cond) __builtin_expect ((cond), 0)

具体的步骤太复杂了,大概意思就是减少 mmap 的内存大小,从函数名字也可以看得出来。

  1. atomic_decrement (&mp_.n_mmaps);
  2. atomic_add (&mp_.mmapped_mem, -total_size);

错误判断。

  1. __munmap ((char *) block, total_size);
  2. int
  3. __munmap (void *addr, size_t len)
  4. {
  5. __set_errno (ENOSYS);
  6. return -1;
  7. }

所以这里主要对 mmap 进行操作就是 atomic_decrement、atomic_add 这两个函数。

5 . 回到 __libc_free 函数中继续往下

这里涉及到 tcache 机制的就暂时先不看了。

  1. MAYBE_INIT_TCACHE (); // 判断是否使用 tcache ,存在的话就调用 tcache_init() 进行初始化

检查这个块是不是位于主分配区,如果是的话就拿到 main_arena 的地址。

  1. ar_ptr = arena_for_chunk (p);
  2. #define arena_for_chunk(ptr) \
  3. (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)

将 ar_ptr,p 传入 _int_free 中,交给 _int_free 处理。

  1. _int_free (ar_ptr, p, 0);

小总结

至此,__libc_free 函数的代码就大概分析完了,总结一下他做了什么事。
1. 判断 __free_hook 是否存在(pwn 中经常通过写 one_gadget 到这里触发)
2. 判读是不是 mmap 过来的区域,是的话直接就交给 munmap_chunk 处理了
3. 各种检查性判断

_int_free 函数

整体代码:

  1. static void
  2. _int_free (mstate av, mchunkptr p, int have_lock)
  3. {
  4. INTERNAL_SIZE_T size; /* its size */
  5. mfastbinptr *fb; /* associated fastbin */
  6. mchunkptr nextchunk; /* next contiguous chunk */
  7. INTERNAL_SIZE_T nextsize; /* its size */
  8. int nextinuse; /* true if nextchunk is used */
  9. INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
  10. mchunkptr bck; /* misc temp for linking */
  11. mchunkptr fwd; /* misc temp for linking */
  12. size = chunksize (p);
  13. /* Little security check which won't hurt performance: the
  14. allocator never wrapps around at the end of the address space.
  15. Therefore we can exclude some size values which might appear
  16. here by accident or by "design" from some intruder. */
  17. if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
  18. || __builtin_expect (misaligned_chunk (p), 0))
  19. malloc_printerr ("free(): invalid pointer");
  20. /* We know that each chunk is at least MINSIZE bytes in size or a
  21. multiple of MALLOC_ALIGNMENT. */
  22. if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
  23. malloc_printerr ("free(): invalid size");
  24. check_inuse_chunk(av, p);
  25. ......
  26. /*
  27. If eligible, place chunk on a fastbin so it can be found
  28. and used quickly in malloc.
  29. */
  30. if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
  31. #if TRIM_FASTBINS
  32. /*
  33. If TRIM_FASTBINS set, don't place chunks
  34. bordering top into fastbins
  35. */
  36. && (chunk_at_offset(p, size) != av->top)
  37. #endif
  38. ) {
  39. if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
  40. <= 2 * SIZE_SZ, 0)
  41. || __builtin_expect (chunksize (chunk_at_offset (p, size))
  42. >= av->system_mem, 0))
  43. {
  44. bool fail = true;
  45. /* We might not have a lock at this point and concurrent modifications
  46. of system_mem might result in a false positive. Redo the test after
  47. getting the lock. */
  48. if (!have_lock)
  49. {
  50. __libc_lock_lock (av->mutex);
  51. fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
  52. || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
  53. __libc_lock_unlock (av->mutex);
  54. }
  55. if (fail)
  56. malloc_printerr ("free(): invalid next size (fast)");
  57. }
  58. free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
  59. atomic_store_relaxed (&av->have_fastchunks, true);
  60. unsigned int idx = fastbin_index(size);
  61. fb = &fastbin (av, idx);
  62. /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
  63. mchunkptr old = *fb, old2;
  64. if (SINGLE_THREAD_P)
  65. {
  66. /* Check that the top of the bin is not the record we are going to
  67. add (i.e., double free). */
  68. if (__builtin_expect (old == p, 0))
  69. malloc_printerr ("double free or corruption (fasttop)");
  70. p->fd = old;
  71. *fb = p;
  72. }
  73. else
  74. do
  75. {
  76. /* Check that the top of the bin is not the record we are going to
  77. add (i.e., double free). */
  78. if (__builtin_expect (old == p, 0))
  79. malloc_printerr ("double free or corruption (fasttop)");
  80. p->fd = old2 = old;
  81. }
  82. while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
  83. != old2);
  84. /* Check that size of fastbin chunk at the top is the same as
  85. size of the chunk that we are adding. We can dereference OLD
  86. only if we have the lock, otherwise it might have already been
  87. allocated again. */
  88. if (have_lock && old != NULL
  89. && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
  90. malloc_printerr ("invalid fastbin entry (free)");
  91. }
  92. /*
  93. Consolidate other non-mmapped chunks as they arrive.
  94. */
  95. else if (!chunk_is_mmapped(p)) {
  96. /* If we're single-threaded, don't lock the arena. */
  97. if (SINGLE_THREAD_P)
  98. have_lock = true;
  99. if (!have_lock)
  100. __libc_lock_lock (av->mutex);
  101. nextchunk = chunk_at_offset(p, size);
  102. /* Lightweight tests: check whether the block is already the
  103. top block. */
  104. if (__glibc_unlikely (p == av->top))
  105. malloc_printerr ("double free or corruption (top)");
  106. /* Or whether the next chunk is beyond the boundaries of the arena. */
  107. if (__builtin_expect (contiguous (av)
  108. && (char *) nextchunk
  109. >= ((char *) av->top + chunksize(av->top)), 0))
  110. malloc_printerr ("double free or corruption (out)");
  111. /* Or whether the block is actually not marked used. */
  112. if (__glibc_unlikely (!prev_inuse(nextchunk)))
  113. malloc_printerr ("double free or corruption (!prev)");
  114. nextsize = chunksize(nextchunk);
  115. if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
  116. || __builtin_expect (nextsize >= av->system_mem, 0))
  117. malloc_printerr ("free(): invalid next size (normal)");
  118. free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
  119. /* consolidate backward */
  120. if (!prev_inuse(p)) {
  121. prevsize = prev_size (p);
  122. size += prevsize;
  123. p = chunk_at_offset(p, -((long) prevsize));
  124. if (__glibc_unlikely (chunksize(p) != prevsize))
  125. malloc_printerr ("corrupted size vs. prev_size while consolidating");
  126. unlink_chunk (av, p);
  127. }
  128. if (nextchunk != av->top) {
  129. /* get and clear inuse bit */
  130. nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
  131. /* consolidate forward */
  132. if (!nextinuse) {
  133. unlink_chunk (av, nextchunk);
  134. size += nextsize;
  135. } else
  136. clear_inuse_bit_at_offset(nextchunk, 0);
  137. /*
  138. Place the chunk in unsorted chunk list. Chunks are
  139. not placed into regular bins until after they have
  140. been given one chance to be used in malloc.
  141. */
  142. bck = unsorted_chunks(av);
  143. fwd = bck->fd;
  144. if (__glibc_unlikely (fwd->bk != bck))
  145. malloc_printerr ("free(): corrupted unsorted chunks");
  146. p->fd = fwd;
  147. p->bk = bck;
  148. if (!in_smallbin_range(size))
  149. {
  150. p->fd_nextsize = NULL;
  151. p->bk_nextsize = NULL;
  152. }
  153. bck->fd = p;
  154. fwd->bk = p;
  155. set_head(p, size | PREV_INUSE);
  156. set_foot(p, size);
  157. check_free_chunk(av, p);
  158. }
  159. /*
  160. If the chunk borders the current high end of memory,
  161. consolidate into top
  162. */
  163. else {
  164. size += nextsize;
  165. set_head(p, size | PREV_INUSE);
  166. av->top = p;
  167. check_chunk(av, p);
  168. }
  169. /*
  170. If freeing a large space, consolidate possibly-surrounding
  171. chunks. Then, if the total unused topmost memory exceeds trim
  172. threshold, ask malloc_trim to reduce top.
  173. Unless max_fast is 0, we don't know if there are fastbins
  174. bordering top, so we cannot tell for sure whether threshold
  175. has been reached unless fastbins are consolidated. But we
  176. don't want to consolidate on each free. As a compromise,
  177. consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
  178. is reached.
  179. */
  180. if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
  181. if (atomic_load_relaxed (&av->have_fastchunks))
  182. malloc_consolidate(av);
  183. if (av == &main_arena) {
  184. #ifndef MORECORE_CANNOT_TRIM
  185. if ((unsigned long)(chunksize(av->top)) >=
  186. (unsigned long)(mp_.trim_threshold))
  187. systrim(mp_.top_pad, av);
  188. #endif
  189. } else {
  190. /* Always try heap_trim(), even if the top chunk is not
  191. large, because the corresponding heap might go away. */
  192. heap_info *heap = heap_for_ptr(top(av));
  193. assert(heap->ar_ptr == av);
  194. heap_trim(heap, mp_.top_pad);
  195. }
  196. }
  197. if (!have_lock)
  198. __libc_lock_unlock (av->mutex);
  199. }
  200. /*
  201. If the chunk was allocated via mmap, release via munmap().
  202. */
  203. else {
  204. munmap_chunk (p);
  205. }
  206. }

代码有些长,其中把 use_tcache 的判断都去掉了。还是一步步来看。

传入的参数

定义的变量如下,获取堆块的基本信息。

  1. INTERNAL_SIZE_T size; /* sizeof(unsigned long int) */
  2. mfastbinptr *fb; /* 指向 chunk 结构体的指针*/
  3. typedef struct malloc_chunk *mfastbinptr;
  4. mchunkptr nextchunk; /* 指向 chunk 结构体的指针 */
  5. typedef struct malloc_chunk* mchunkptr;
  6. INTERNAL_SIZE_T nextsize; /* sizeof(unsigned long int) */
  7. int nextinuse; /* 指示下一个 chunk 是否是已经使用的 */
  8. INTERNAL_SIZE_T prevsize; /* prevsize */
  9. mchunkptr bck; /* 指向 chunk 结构体的指针,bk 指针 */
  10. mchunkptr fwd; /* m指向 chunk 结构体的指针,fd 指针 */

函数操作步骤

  1. 获取传入的 chunk 的 size

获取除去标志位的真实 chunk 的 size 的大小。

  1. size = chunksize (p);
  2. =>
  3. #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
  4. #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
  5. =>
  6. #define chunksize_nomask(p) ((p)->mchunk_size)
添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注