A local copy of OpenSSL from GitHub
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

644 lines
17 KiB

Revert the crypto "global lock" implementation Conceptually, this is a squashed version of: Revert "Address feedback" This reverts commit 75551e07bd2339dfea06ef1d31d69929e13a4495. and Revert "Add CRYPTO_thread_glock_new" This reverts commit ed6b2c7938ec6f07b15745d4183afc276e74c6dd. But there were some intervening commits that made neither revert apply cleanly, so instead do it all as one shot. The crypto global locks were an attempt to cope with the awkward POSIX semantics for pthread_atfork(); its documentation (the "RATIONALE" section) indicates that the expected usage is to have the prefork handler lock all "global" locks, and the parent and child handlers release those locks, to ensure that forking happens with a consistent (lock) state. However, the set of functions available in the child process is limited to async-signal-safe functions, and pthread_mutex_unlock() is not on the list of async-signal-safe functions! The only synchronization primitives that are async-signal-safe are the semaphore primitives, which are not really appropriate for general-purpose usage. However, the state consistency problem that the global locks were attempting to solve is not actually a serious problem, particularly for OpenSSL. That is, we can consider four cases of forking application that might use OpenSSL: (1) Single-threaded, does not call into OpenSSL in the child (e.g., the child calls exec() immediately) For this class of process, no locking is needed at all, since there is only ever a single thread of execution and the only reentrancy is due to signal handlers (which are themselves limited to async-signal-safe operation and should not be doing much work at all). (2) Single-threaded, calls into OpenSSL after fork() The application must ensure that it does not fork() with an unexpected lock held (that is, one that would get unlocked in the parent but accidentally remain locked in the child and cause deadlock). Since OpenSSL does not expose any of its internal locks to the application and the application is single-threaded, the OpenSSL internal locks will be unlocked for the fork(), and the state will be consistent. (OpenSSL will need to reseed its PRNG in the child, but that is an orthogonal issue.) If the application makes use of locks from libcrypto, proper handling for those locks is the responsibility of the application, as for any other locking primitive that is available for application programming. (3) Multi-threaded, does not call into OpenSSL after fork() As for (1), the OpenSSL state is only relevant in the parent, so no particular fork()-related handling is needed. The internal locks are relevant, but there is no interaction with the child to consider. (4) Multi-threaded, calls into OpenSSL after fork() This is the case where the pthread_atfork() hooks to ensure that all global locks are in a known state across fork() would come into play, per the above discussion. However, these "calls into OpenSSL after fork()" are still subject to the restriction to async-signal-safe functions. Since OpenSSL uses all sorts of locking and libc functions that are not on the list of safe functions (e.g., malloc()), this case is not currently usable and is unlikely to ever be usable, independently of the locking situation. So, there is no need to go through contortions to attempt to support this case in the one small area of locking interaction with fork(). In light of the above analysis (thanks @davidben and @achernya), go back to the simpler implementation that does not need to distinguish "library-global" locks or to have complicated atfork handling for locks. Reviewed-by: Kurt Roeckx <kurt@roeckx.be> Reviewed-by: Matthias St. Pierre <Matthias.St.Pierre@ncp-e.com> (Merged from https://github.com/openssl/openssl/pull/5089)
5 years ago
  1. /*
  2. * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved.
  3. * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
  4. *
  5. * Licensed under the Apache License 2.0 (the "License"). You may not use
  6. * this file except in compliance with the License. You can obtain a copy
  7. * in the file LICENSE in the source distribution or at
  8. * https://www.openssl.org/source/license.html
  9. */
  10. /*
  11. * This file is in two halves. The first half implements the public API
  12. * to be used by external consumers, and to be used by OpenSSL to store
  13. * data in a "secure arena." The second half implements the secure arena.
  14. * For details on that implementation, see below (look for uppercase
  15. * "SECURE HEAP IMPLEMENTATION").
  16. */
  17. #include "e_os.h"
  18. #include <openssl/crypto.h>
  19. #include <string.h>
  20. #ifndef OPENSSL_NO_SECURE_MEMORY
  21. # include <stdlib.h>
  22. # include <assert.h>
  23. # include <unistd.h>
  24. # include <sys/types.h>
  25. # include <sys/mman.h>
  26. # if defined(OPENSSL_SYS_LINUX)
  27. # include <sys/syscall.h>
  28. # if defined(SYS_mlock2)
  29. # include <linux/mman.h>
  30. # include <errno.h>
  31. # endif
  32. # include <sys/param.h>
  33. # endif
  34. # include <sys/stat.h>
  35. # include <fcntl.h>
  36. #endif
  37. #define CLEAR(p, s) OPENSSL_cleanse(p, s)
  38. #ifndef PAGE_SIZE
  39. # define PAGE_SIZE 4096
  40. #endif
  41. #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
  42. # define MAP_ANON MAP_ANONYMOUS
  43. #endif
  44. #ifndef OPENSSL_NO_SECURE_MEMORY
  45. static size_t secure_mem_used;
  46. static int secure_mem_initialized;
  47. static CRYPTO_RWLOCK *sec_malloc_lock = NULL;
  48. /*
  49. * These are the functions that must be implemented by a secure heap (sh).
  50. */
  51. static int sh_init(size_t size, size_t minsize);
  52. static void *sh_malloc(size_t size);
  53. static void sh_free(void *ptr);
  54. static void sh_done(void);
  55. static size_t sh_actual_size(char *ptr);
  56. static int sh_allocated(const char *ptr);
  57. #endif
  58. int CRYPTO_secure_malloc_init(size_t size, size_t minsize)
  59. {
  60. #ifndef OPENSSL_NO_SECURE_MEMORY
  61. int ret = 0;
  62. if (!secure_mem_initialized) {
  63. sec_malloc_lock = CRYPTO_THREAD_lock_new();
  64. if (sec_malloc_lock == NULL)
  65. return 0;
  66. if ((ret = sh_init(size, minsize)) != 0) {
  67. secure_mem_initialized = 1;
  68. } else {
  69. CRYPTO_THREAD_lock_free(sec_malloc_lock);
  70. sec_malloc_lock = NULL;
  71. }
  72. }
  73. return ret;
  74. #else
  75. return 0;
  76. #endif /* OPENSSL_NO_SECURE_MEMORY */
  77. }
  78. int CRYPTO_secure_malloc_done(void)
  79. {
  80. #ifndef OPENSSL_NO_SECURE_MEMORY
  81. if (secure_mem_used == 0) {
  82. sh_done();
  83. secure_mem_initialized = 0;
  84. CRYPTO_THREAD_lock_free(sec_malloc_lock);
  85. sec_malloc_lock = NULL;
  86. return 1;
  87. }
  88. #endif /* OPENSSL_NO_SECURE_MEMORY */
  89. return 0;
  90. }
  91. int CRYPTO_secure_malloc_initialized(void)
  92. {
  93. #ifndef OPENSSL_NO_SECURE_MEMORY
  94. return secure_mem_initialized;
  95. #else
  96. return 0;
  97. #endif /* OPENSSL_NO_SECURE_MEMORY */
  98. }
  99. void *CRYPTO_secure_malloc(size_t num, const char *file, int line)
  100. {
  101. #ifndef OPENSSL_NO_SECURE_MEMORY
  102. void *ret;
  103. size_t actual_size;
  104. if (!secure_mem_initialized) {
  105. return CRYPTO_malloc(num, file, line);
  106. }
  107. CRYPTO_THREAD_write_lock(sec_malloc_lock);
  108. ret = sh_malloc(num);
  109. actual_size = ret ? sh_actual_size(ret) : 0;
  110. secure_mem_used += actual_size;
  111. CRYPTO_THREAD_unlock(sec_malloc_lock);
  112. return ret;
  113. #else
  114. return CRYPTO_malloc(num, file, line);
  115. #endif /* OPENSSL_NO_SECURE_MEMORY */
  116. }
  117. void *CRYPTO_secure_zalloc(size_t num, const char *file, int line)
  118. {
  119. #ifndef OPENSSL_NO_SECURE_MEMORY
  120. if (secure_mem_initialized)
  121. /* CRYPTO_secure_malloc() zeroes allocations when it is implemented */
  122. return CRYPTO_secure_malloc(num, file, line);
  123. #endif
  124. return CRYPTO_zalloc(num, file, line);
  125. }
  126. void CRYPTO_secure_free(void *ptr, const char *file, int line)
  127. {
  128. #ifndef OPENSSL_NO_SECURE_MEMORY
  129. size_t actual_size;
  130. if (ptr == NULL)
  131. return;
  132. if (!CRYPTO_secure_allocated(ptr)) {
  133. CRYPTO_free(ptr, file, line);
  134. return;
  135. }
  136. CRYPTO_THREAD_write_lock(sec_malloc_lock);
  137. actual_size = sh_actual_size(ptr);
  138. CLEAR(ptr, actual_size);
  139. secure_mem_used -= actual_size;
  140. sh_free(ptr);
  141. CRYPTO_THREAD_unlock(sec_malloc_lock);
  142. #else
  143. CRYPTO_free(ptr, file, line);
  144. #endif /* OPENSSL_NO_SECURE_MEMORY */
  145. }
  146. void CRYPTO_secure_clear_free(void *ptr, size_t num,
  147. const char *file, int line)
  148. {
  149. #ifndef OPENSSL_NO_SECURE_MEMORY
  150. size_t actual_size;
  151. if (ptr == NULL)
  152. return;
  153. if (!CRYPTO_secure_allocated(ptr)) {
  154. OPENSSL_cleanse(ptr, num);
  155. CRYPTO_free(ptr, file, line);
  156. return;
  157. }
  158. CRYPTO_THREAD_write_lock(sec_malloc_lock);
  159. actual_size = sh_actual_size(ptr);
  160. CLEAR(ptr, actual_size);
  161. secure_mem_used -= actual_size;
  162. sh_free(ptr);
  163. CRYPTO_THREAD_unlock(sec_malloc_lock);
  164. #else
  165. if (ptr == NULL)
  166. return;
  167. OPENSSL_cleanse(ptr, num);
  168. CRYPTO_free(ptr, file, line);
  169. #endif /* OPENSSL_NO_SECURE_MEMORY */
  170. }
  171. int CRYPTO_secure_allocated(const void *ptr)
  172. {
  173. #ifndef OPENSSL_NO_SECURE_MEMORY
  174. int ret;
  175. if (!secure_mem_initialized)
  176. return 0;
  177. CRYPTO_THREAD_write_lock(sec_malloc_lock);
  178. ret = sh_allocated(ptr);
  179. CRYPTO_THREAD_unlock(sec_malloc_lock);
  180. return ret;
  181. #else
  182. return 0;
  183. #endif /* OPENSSL_NO_SECURE_MEMORY */
  184. }
  185. size_t CRYPTO_secure_used(void)
  186. {
  187. #ifndef OPENSSL_NO_SECURE_MEMORY
  188. return secure_mem_used;
  189. #else
  190. return 0;
  191. #endif /* OPENSSL_NO_SECURE_MEMORY */
  192. }
  193. size_t CRYPTO_secure_actual_size(void *ptr)
  194. {
  195. #ifndef OPENSSL_NO_SECURE_MEMORY
  196. size_t actual_size;
  197. CRYPTO_THREAD_write_lock(sec_malloc_lock);
  198. actual_size = sh_actual_size(ptr);
  199. CRYPTO_THREAD_unlock(sec_malloc_lock);
  200. return actual_size;
  201. #else
  202. return 0;
  203. #endif
  204. }
  205. /* END OF PAGE ...
  206. ... START OF PAGE */
  207. /*
  208. * SECURE HEAP IMPLEMENTATION
  209. */
  210. #ifndef OPENSSL_NO_SECURE_MEMORY
  211. /*
  212. * The implementation provided here uses a fixed-sized mmap() heap,
  213. * which is locked into memory, not written to core files, and protected
  214. * on either side by an unmapped page, which will catch pointer overruns
  215. * (or underruns) and an attempt to read data out of the secure heap.
  216. * Free'd memory is zero'd or otherwise cleansed.
  217. *
  218. * This is a pretty standard buddy allocator. We keep areas in a multiple
  219. * of "sh.minsize" units. The freelist and bitmaps are kept separately,
  220. * so all (and only) data is kept in the mmap'd heap.
  221. *
  222. * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the
  223. * place.
  224. */
  225. #define ONE ((size_t)1)
  226. # define TESTBIT(t, b) (t[(b) >> 3] & (ONE << ((b) & 7)))
  227. # define SETBIT(t, b) (t[(b) >> 3] |= (ONE << ((b) & 7)))
  228. # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7))))
  229. #define WITHIN_ARENA(p) \
  230. ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
  231. #define WITHIN_FREELIST(p) \
  232. ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
  233. typedef struct sh_list_st
  234. {
  235. struct sh_list_st *next;
  236. struct sh_list_st **p_next;
  237. } SH_LIST;
  238. typedef struct sh_st
  239. {
  240. char* map_result;
  241. size_t map_size;
  242. char *arena;
  243. size_t arena_size;
  244. char **freelist;
  245. ossl_ssize_t freelist_size;
  246. size_t minsize;
  247. unsigned char *bittable;
  248. unsigned char *bitmalloc;
  249. size_t bittable_size; /* size in bits */
  250. } SH;
  251. static SH sh;
  252. static size_t sh_getlist(char *ptr)
  253. {
  254. ossl_ssize_t list = sh.freelist_size - 1;
  255. size_t bit = (sh.arena_size + ptr - sh.arena) / sh.minsize;
  256. for (; bit; bit >>= 1, list--) {
  257. if (TESTBIT(sh.bittable, bit))
  258. break;
  259. OPENSSL_assert((bit & 1) == 0);
  260. }
  261. return list;
  262. }
  263. static int sh_testbit(char *ptr, int list, unsigned char *table)
  264. {
  265. size_t bit;
  266. OPENSSL_assert(list >= 0 && list < sh.freelist_size);
  267. OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
  268. bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
  269. OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
  270. return TESTBIT(table, bit);
  271. }
  272. static void sh_clearbit(char *ptr, int list, unsigned char *table)
  273. {
  274. size_t bit;
  275. OPENSSL_assert(list >= 0 && list < sh.freelist_size);
  276. OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
  277. bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
  278. OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
  279. OPENSSL_assert(TESTBIT(table, bit));
  280. CLEARBIT(table, bit);
  281. }
  282. static void sh_setbit(char *ptr, int list, unsigned char *table)
  283. {
  284. size_t bit;
  285. OPENSSL_assert(list >= 0 && list < sh.freelist_size);
  286. OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
  287. bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
  288. OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
  289. OPENSSL_assert(!TESTBIT(table, bit));
  290. SETBIT(table, bit);
  291. }
  292. static void sh_add_to_list(char **list, char *ptr)
  293. {
  294. SH_LIST *temp;
  295. OPENSSL_assert(WITHIN_FREELIST(list));
  296. OPENSSL_assert(WITHIN_ARENA(ptr));
  297. temp = (SH_LIST *)ptr;
  298. temp->next = *(SH_LIST **)list;
  299. OPENSSL_assert(temp->next == NULL || WITHIN_ARENA(temp->next));
  300. temp->p_next = (SH_LIST **)list;
  301. if (temp->next != NULL) {
  302. OPENSSL_assert((char **)temp->next->p_next == list);
  303. temp->next->p_next = &(temp->next);
  304. }
  305. *list = ptr;
  306. }
  307. static void sh_remove_from_list(char *ptr)
  308. {
  309. SH_LIST *temp, *temp2;
  310. temp = (SH_LIST *)ptr;
  311. if (temp->next != NULL)
  312. temp->next->p_next = temp->p_next;
  313. *temp->p_next = temp->next;
  314. if (temp->next == NULL)
  315. return;
  316. temp2 = temp->next;
  317. OPENSSL_assert(WITHIN_FREELIST(temp2->p_next) || WITHIN_ARENA(temp2->p_next));
  318. }
  319. static int sh_init(size_t size, size_t minsize)
  320. {
  321. int ret;
  322. size_t i;
  323. size_t pgsize;
  324. size_t aligned;
  325. memset(&sh, 0, sizeof(sh));
  326. /* make sure size and minsize are powers of 2 */
  327. OPENSSL_assert(size > 0);
  328. OPENSSL_assert((size & (size - 1)) == 0);
  329. OPENSSL_assert((minsize & (minsize - 1)) == 0);
  330. if (size <= 0 || (size & (size - 1)) != 0)
  331. goto err;
  332. if (minsize == 0 || (minsize & (minsize - 1)) != 0)
  333. goto err;
  334. while (minsize < (int)sizeof(SH_LIST))
  335. minsize *= 2;
  336. sh.arena_size = size;
  337. sh.minsize = minsize;
  338. sh.bittable_size = (sh.arena_size / sh.minsize) * 2;
  339. /* Prevent allocations of size 0 later on */
  340. if (sh.bittable_size >> 3 == 0)
  341. goto err;
  342. sh.freelist_size = -1;
  343. for (i = sh.bittable_size; i; i >>= 1)
  344. sh.freelist_size++;
  345. sh.freelist = OPENSSL_zalloc(sh.freelist_size * sizeof(char *));
  346. OPENSSL_assert(sh.freelist != NULL);
  347. if (sh.freelist == NULL)
  348. goto err;
  349. sh.bittable = OPENSSL_zalloc(sh.bittable_size >> 3);
  350. OPENSSL_assert(sh.bittable != NULL);
  351. if (sh.bittable == NULL)
  352. goto err;
  353. sh.bitmalloc = OPENSSL_zalloc(sh.bittable_size >> 3);
  354. OPENSSL_assert(sh.bitmalloc != NULL);
  355. if (sh.bitmalloc == NULL)
  356. goto err;
  357. /* Allocate space for heap, and two extra pages as guards */
  358. #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
  359. {
  360. # if defined(_SC_PAGE_SIZE)
  361. long tmppgsize = sysconf(_SC_PAGE_SIZE);
  362. # else
  363. long tmppgsize = sysconf(_SC_PAGESIZE);
  364. # endif
  365. if (tmppgsize < 1)
  366. pgsize = PAGE_SIZE;
  367. else
  368. pgsize = (size_t)tmppgsize;
  369. }
  370. #else
  371. pgsize = PAGE_SIZE;
  372. #endif
  373. sh.map_size = pgsize + sh.arena_size + pgsize;
  374. if (1) {
  375. #ifdef MAP_ANON
  376. sh.map_result = mmap(NULL, sh.map_size,
  377. PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
  378. } else {
  379. #endif
  380. int fd;
  381. sh.map_result = MAP_FAILED;
  382. if ((fd = open("/dev/zero", O_RDWR)) >= 0) {
  383. sh.map_result = mmap(NULL, sh.map_size,
  384. PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
  385. close(fd);
  386. }
  387. }
  388. if (sh.map_result == MAP_FAILED)
  389. goto err;
  390. sh.arena = (char *)(sh.map_result + pgsize);
  391. sh_setbit(sh.arena, 0, sh.bittable);
  392. sh_add_to_list(&sh.freelist[0], sh.arena);
  393. /* Now try to add guard pages and lock into memory. */
  394. ret = 1;
  395. /* Starting guard is already aligned from mmap. */
  396. if (mprotect(sh.map_result, pgsize, PROT_NONE) < 0)
  397. ret = 2;
  398. /* Ending guard page - need to round up to page boundary */
  399. aligned = (pgsize + sh.arena_size + (pgsize - 1)) & ~(pgsize - 1);
  400. if (mprotect(sh.map_result + aligned, pgsize, PROT_NONE) < 0)
  401. ret = 2;
  402. #if defined(OPENSSL_SYS_LINUX) && defined(MLOCK_ONFAULT) && defined(SYS_mlock2)
  403. if (syscall(SYS_mlock2, sh.arena, sh.arena_size, MLOCK_ONFAULT) < 0) {
  404. if (errno == ENOSYS) {
  405. if (mlock(sh.arena, sh.arena_size) < 0)
  406. ret = 2;
  407. } else {
  408. ret = 2;
  409. }
  410. }
  411. #else
  412. if (mlock(sh.arena, sh.arena_size) < 0)
  413. ret = 2;
  414. #endif
  415. #ifdef MADV_DONTDUMP
  416. if (madvise(sh.arena, sh.arena_size, MADV_DONTDUMP) < 0)
  417. ret = 2;
  418. #endif
  419. return ret;
  420. err:
  421. sh_done();
  422. return 0;
  423. }
  424. static void sh_done(void)
  425. {
  426. OPENSSL_free(sh.freelist);
  427. OPENSSL_free(sh.bittable);
  428. OPENSSL_free(sh.bitmalloc);
  429. if (sh.map_result != NULL && sh.map_size)
  430. munmap(sh.map_result, sh.map_size);
  431. memset(&sh, 0, sizeof(sh));
  432. }
  433. static int sh_allocated(const char *ptr)
  434. {
  435. return WITHIN_ARENA(ptr) ? 1 : 0;
  436. }
  437. static char *sh_find_my_buddy(char *ptr, int list)
  438. {
  439. size_t bit;
  440. char *chunk = NULL;
  441. bit = (ONE << list) + (ptr - sh.arena) / (sh.arena_size >> list);
  442. bit ^= 1;
  443. if (TESTBIT(sh.bittable, bit) && !TESTBIT(sh.bitmalloc, bit))
  444. chunk = sh.arena + ((bit & ((ONE << list) - 1)) * (sh.arena_size >> list));
  445. return chunk;
  446. }
  447. static void *sh_malloc(size_t size)
  448. {
  449. ossl_ssize_t list, slist;
  450. size_t i;
  451. char *chunk;
  452. if (size > sh.arena_size)
  453. return NULL;
  454. list = sh.freelist_size - 1;
  455. for (i = sh.minsize; i < size; i <<= 1)
  456. list--;
  457. if (list < 0)
  458. return NULL;
  459. /* try to find a larger entry to split */
  460. for (slist = list; slist >= 0; slist--)
  461. if (sh.freelist[slist] != NULL)
  462. break;
  463. if (slist < 0)
  464. return NULL;
  465. /* split larger entry */
  466. while (slist != list) {
  467. char *temp = sh.freelist[slist];
  468. /* remove from bigger list */
  469. OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
  470. sh_clearbit(temp, slist, sh.bittable);
  471. sh_remove_from_list(temp);
  472. OPENSSL_assert(temp != sh.freelist[slist]);
  473. /* done with bigger list */
  474. slist++;
  475. /* add to smaller list */
  476. OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
  477. sh_setbit(temp, slist, sh.bittable);
  478. sh_add_to_list(&sh.freelist[slist], temp);
  479. OPENSSL_assert(sh.freelist[slist] == temp);
  480. /* split in 2 */
  481. temp += sh.arena_size >> slist;
  482. OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
  483. sh_setbit(temp, slist, sh.bittable);
  484. sh_add_to_list(&sh.freelist[slist], temp);
  485. OPENSSL_assert(sh.freelist[slist] == temp);
  486. OPENSSL_assert(temp-(sh.arena_size >> slist) == sh_find_my_buddy(temp, slist));
  487. }
  488. /* peel off memory to hand back */
  489. chunk = sh.freelist[list];
  490. OPENSSL_assert(sh_testbit(chunk, list, sh.bittable));
  491. sh_setbit(chunk, list, sh.bitmalloc);
  492. sh_remove_from_list(chunk);
  493. OPENSSL_assert(WITHIN_ARENA(chunk));
  494. /* zero the free list header as a precaution against information leakage */
  495. memset(chunk, 0, sizeof(SH_LIST));
  496. return chunk;
  497. }
  498. static void sh_free(void *ptr)
  499. {
  500. size_t list;
  501. void *buddy;
  502. if (ptr == NULL)
  503. return;
  504. OPENSSL_assert(WITHIN_ARENA(ptr));
  505. if (!WITHIN_ARENA(ptr))
  506. return;
  507. list = sh_getlist(ptr);
  508. OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
  509. sh_clearbit(ptr, list, sh.bitmalloc);
  510. sh_add_to_list(&sh.freelist[list], ptr);
  511. /* Try to coalesce two adjacent free areas. */
  512. while ((buddy = sh_find_my_buddy(ptr, list)) != NULL) {
  513. OPENSSL_assert(ptr == sh_find_my_buddy(buddy, list));
  514. OPENSSL_assert(ptr != NULL);
  515. OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
  516. sh_clearbit(ptr, list, sh.bittable);
  517. sh_remove_from_list(ptr);
  518. OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
  519. sh_clearbit(buddy, list, sh.bittable);
  520. sh_remove_from_list(buddy);
  521. list--;
  522. /* Zero the higher addressed block's free list pointers */
  523. memset(ptr > buddy ? ptr : buddy, 0, sizeof(SH_LIST));
  524. if (ptr > buddy)
  525. ptr = buddy;
  526. OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
  527. sh_setbit(ptr, list, sh.bittable);
  528. sh_add_to_list(&sh.freelist[list], ptr);
  529. OPENSSL_assert(sh.freelist[list] == ptr);
  530. }
  531. }
  532. static size_t sh_actual_size(char *ptr)
  533. {
  534. int list;
  535. OPENSSL_assert(WITHIN_ARENA(ptr));
  536. if (!WITHIN_ARENA(ptr))
  537. return 0;
  538. list = sh_getlist(ptr);
  539. OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
  540. return sh.arena_size / (ONE << list);
  541. }
  542. #endif /* OPENSSL_NO_SECURE_MEMORY */