42#if defined(_SC_PAGE_SIZE)
43#define MHD_SC_PAGESIZE _SC_PAGE_SIZE
44#elif defined(_SC_PAGESIZE)
45#define MHD_SC_PAGESIZE _SC_PAGESIZE
50#if defined(MHD_USE_PAGESIZE_MACRO) || defined(MHD_USE_PAGE_SIZE_MACRO)
54#ifdef HAVE_SYS_PARAM_H
62#define _MHD_FALLBACK_PAGE_SIZE (4096)
64#if defined(MHD_USE_PAGESIZE_MACRO)
65#define MHD_DEF_PAGE_SIZE_ PAGESIZE
66#elif defined(MHD_USE_PAGE_SIZE_MACRO)
67#define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
69#define MHD_DEF_PAGE_SIZE_ _MHD_FALLBACK_PAGE_SIZE
73#ifdef MHD_ASAN_POISON_ACTIVE
74#include <sanitizer/asan_interface.h>
78#if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
79#define MAP_ANONYMOUS MAP_ANON
82#define MAP_FAILED NULL
83#elif ! defined(MAP_FAILED)
84#define MAP_FAILED ((void*) -1)
90#define ALIGN_SIZE (2 * sizeof(void*))
95#define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
96 / (ALIGN_SIZE) *(ALIGN_SIZE))
99#ifndef MHD_ASAN_POISON_ACTIVE
100#define _MHD_NOSANITIZE_PTRS
101#define _MHD_RED_ZONE_SIZE (0)
102#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) ROUND_TO_ALIGN(n)
103#define _MHD_POISON_MEMORY(pointer, size) (void)0
104#define _MHD_UNPOISON_MEMORY(pointer, size) (void)0
108#define mp_ptr_le_(p1,p2) \
109 (((const uint8_t*)(p1)) <= ((const uint8_t*)(p2)))
114#define mp_ptr_diff_(p1,p2) \
115 ((size_t)(((const uint8_t*)(p1)) - ((const uint8_t*)(p2))))
117#define _MHD_RED_ZONE_SIZE (ALIGN_SIZE)
118#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) (ROUND_TO_ALIGN(n) + _MHD_RED_ZONE_SIZE)
119#define _MHD_POISON_MEMORY(pointer, size) \
120 ASAN_POISON_MEMORY_REGION ((pointer), (size))
121#define _MHD_UNPOISON_MEMORY(pointer, size) \
122 ASAN_UNPOISON_MEMORY_REGION ((pointer), (size))
123#if defined(FUNC_PTRCOMPARE_CAST_WORKAROUND_WORKS)
127#define mp_ptr_le_(p1,p2) \
128 (((uintptr_t)((const void*)(p1))) <= ((uintptr_t)((const void*)(p2))))
133#define mp_ptr_diff_(p1,p2) \
134 ((size_t)(((uintptr_t)((const uint8_t*)(p1))) - \
135 ((uintptr_t)((const uint8_t*)(p2)))))
136#elif defined(FUNC_ATTR_PTRCOMPARE_WORKS) && \
137 defined(FUNC_ATTR_PTRSUBTRACT_WORKS)
142__attribute__((no_sanitize (
"pointer-compare"))) static
bool
145 return (((
const uint8_t *) p1) <= ((
const uint8_t *) p2));
156__attribute__((no_sanitize (
"pointer-subtract"))) static
size_t
159 return (
size_t) (((
const uint8_t *) p1) - ((
const uint8_t *) p2));
163#elif defined(FUNC_ATTR_NOSANITIZE_WORKS)
168__attribute__((no_sanitize (
"address"))) static
bool
171 return (((
const uint8_t *) p1) <= ((
const uint8_t *) p2));
181__attribute__((no_sanitize (
"address"))) static
size_t
184 return (
size_t) (((
const uint8_t *) p1) - ((
const uint8_t *) p2));
189#error User-poisoning cannot be used
197#if defined(MHD_USE_PAGESIZE_MACRO_STATIC)
199#elif defined(MHD_USE_PAGE_SIZE_MACRO_STATIC)
211#ifdef MHD_SC_PAGESIZE
213 result = sysconf (MHD_SC_PAGESIZE);
272 struct MemoryPool *pool;
277 pool = malloc (
sizeof (
struct MemoryPool));
280#if defined(MAP_ANONYMOUS) || defined(_WIN32)
281 if ( (max <= 32 * 1024) ||
291#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
292 pool->memory = mmap (
NULL,
294 PROT_READ | PROT_WRITE,
295 MAP_PRIVATE | MAP_ANONYMOUS,
299 pool->memory = VirtualAlloc (
NULL,
301 MEM_COMMIT | MEM_RESERVE,
311 pool->memory = malloc (alloc_size);
312 if (
NULL == pool->memory)
317 pool->is_mmap =
false;
319#if defined(MAP_ANONYMOUS) || defined(_WIN32)
322 pool->is_mmap =
true;
327 pool->end = alloc_size;
328 pool->size = alloc_size;
347 mhd_assert (pool->size >= pool->end - pool->pos);
353#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
354 munmap (pool->memory,
357 VirtualFree (pool->memory,
377 mhd_assert (pool->size >= pool->end - pool->pos);
379#ifdef MHD_ASAN_POISON_ACTIVE
407 mhd_assert (pool->size >= pool->end - pool->pos);
410 if ( (0 == asize) && (0 != size) )
412 if (asize > pool->end - pool->pos)
416 ret = &pool->memory[pool->end - asize];
421 ret = &pool->memory[pool->pos];
445 mhd_assert (pool->size >= pool->end - pool->pos);
450 const size_t block_offset =
mp_ptr_diff_ (block, pool->memory);
453 mhd_assert (pool->size >= block_offset + block_size);
483 size_t *required_bytes)
489 mhd_assert (pool->size >= pool->end - pool->pos);
492 if ( (0 == asize) && (0 != size) )
497 if (asize > pool->end - pool->pos)
501 if (asize <= pool->end)
502 *required_bytes = asize - (pool->end - pool->pos);
508 ret = &pool->memory[pool->end - asize];
542 mhd_assert (pool->size >= pool->end - pool->pos);
546#if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
552 const size_t old_offset =
mp_ptr_diff_ (old, pool->memory);
553 const bool shrinking = (old_size > new_size);
561 (pool->pos > old_offset));
567 memset ((uint8_t *) old + new_size, 0, old_size - new_size);
573 const size_t new_apos =
577 if ( (new_apos > pool->end) ||
578 (new_apos < pool->pos) )
582 pool->pos = new_apos;
591 if ( ( (0 == asize) &&
593 (asize > pool->end - pool->pos) )
596 new_blc = pool->memory + pool->pos;
603 memcpy (new_blc, old, old_size);
605 memset (old, 0, old_size);
630 mhd_assert (pool->size >= pool->end - pool->pos);
637 const size_t block_offset =
mp_ptr_diff_ (block, pool->memory);
640 mhd_assert ((block_offset != pool->pos) || (block_size == 0));
644 memset (block, 0, block_size);
647#if ! defined(MHD_FAVOR_SMALL_CODE) && ! defined(MHD_ASAN_POISON_ACTIVE)
651 if (block_offset <= pool->pos)
654 const size_t alg_end =
657 if (alg_end == pool->pos)
662#if defined(MHD_ASAN_POISON_ACTIVE)
663 if (alg_start != block_offset)
666 alg_start - block_offset);
668 else if (0 != alg_start)
670 bool need_red_zone_before;
672#if defined(HAVE___ASAN_REGION_IS_POISONED)
673 need_red_zone_before =
674 (
NULL == __asan_region_is_poisoned (pool->memory
678#elif defined(HAVE___ASAN_ADDRESS_IS_POISONED)
679 need_red_zone_before =
680 (0 == __asan_address_is_poisoned (pool->memory + alg_start - 1));
682 need_red_zone_before =
true;
684 if (need_red_zone_before)
693 pool->pos = alg_start;
703 if (block_offset == pool->end)
706 const size_t alg_end =
735 mhd_assert (pool->size >= pool->end - pool->pos);
742 (pool->size >=
mp_ptr_diff_ (keep, pool->memory) + copy_bytes));
743#if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
744 mhd_assert (
NULL == __asan_region_is_poisoned (keep, copy_bytes));
747 if ( (
NULL != keep) &&
748 (keep != pool->memory) )
751 memmove (pool->memory,
756 if (pool->size > copy_bytes)
760 to_zero = pool->size - copy_bytes;
766 uint8_t *recommit_addr;
769 recommit_addr = pool->memory + pool->size - to_recommit;
773 if (VirtualFree (recommit_addr,
777 to_zero -= to_recommit;
779 if (recommit_addr != VirtualAlloc (recommit_addr,
787 memset (&pool->memory[copy_bytes],
792 pool->end = pool->size;
794 pool->size - new_size);
void * MHD_pool_reallocate(struct MemoryPool *pool, void *old, size_t old_size, size_t new_size)
void MHD_pool_destroy(struct MemoryPool *pool)
size_t MHD_pool_get_free(struct MemoryPool *pool)
void * MHD_pool_reset(struct MemoryPool *pool, void *keep, size_t copy_bytes, size_t new_size)
struct MemoryPool * MHD_pool_create(size_t max)
void * MHD_pool_allocate(struct MemoryPool *pool, size_t size, int from_end)
#define ROUND_TO_ALIGN(n)
void MHD_init_mem_pools_(void)
#define mp_ptr_diff_(p1, p2)
bool MHD_pool_is_resizable_inplace(struct MemoryPool *pool, void *block, size_t block_size)
#define MHD_DEF_PAGE_SIZE_
#define _MHD_FALLBACK_PAGE_SIZE
void MHD_pool_deallocate(struct MemoryPool *pool, void *block, size_t block_size)
void * MHD_pool_try_alloc(struct MemoryPool *pool, size_t size, size_t *required_bytes)
#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n)
#define _MHD_UNPOISON_MEMORY(pointer, size)
static size_t MHD_sys_page_size_
#define mp_ptr_le_(p1, p2)
#define _MHD_POISON_MEMORY(pointer, size)
#define _MHD_RED_ZONE_SIZE
memory pool; mostly used for efficient (de)allocation for each connection and bounding memory use for...
limits values definitions