GNU libmicrohttpd  0.9.69
memorypool.c
Go to the documentation of this file.
1 /*
2  This file is part of libmicrohttpd
3  Copyright (C) 2007--2019 Daniel Pittman, Christian Grothoff and
4  Karlson2k (Evgeny Grin)
5 
6  This library is free software; you can redistribute it and/or
7  modify it under the terms of the GNU Lesser General Public
8  License as published by the Free Software Foundation; either
9  version 2.1 of the License, or (at your option) any later version.
10 
11  This library is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  Lesser General Public License for more details.
15 
16  You should have received a copy of the GNU Lesser General Public
17  License along with this library; if not, write to the Free Software
18  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20 
27 #include "memorypool.h"
28 #include <stdlib.h>
29 #include <string.h>
30 #include <stdint.h>
31 #include "mhd_assert.h"
32 #if HAVE_SYS_MMAN_H
33 #include <sys/mman.h>
34 #endif
35 #ifdef _WIN32
36 #include <windows.h>
37 #endif
38 #ifdef HAVE_SYSCONF
39 #include <unistd.h>
40 #if defined(_SC_PAGE_SIZE)
41 #define MHD_SC_PAGESIZE _SC_PAGE_SIZE
42 #elif defined(_SC_PAGESIZE)
43 #define MHD_SC_PAGESIZE _SC_PAGESIZE
44 #endif /* _SC_PAGESIZE */
45 #endif /* HAVE_SYSCONF */
46 
47 /* define MAP_ANONYMOUS for Mac OS X */
48 #if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
49 #define MAP_ANONYMOUS MAP_ANON
50 #endif
51 #if defined(_WIN32)
52 #define MAP_FAILED NULL
53 #elif ! defined(MAP_FAILED)
54 #define MAP_FAILED ((void*) -1)
55 #endif
56 
60 #define ALIGN_SIZE (2 * sizeof(void*))
61 
65 #define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
66  / (ALIGN_SIZE) *(ALIGN_SIZE))
67 
68 #if defined(PAGE_SIZE)
69 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
70 #elif defined(PAGESIZE)
71 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
72 #else /* ! PAGESIZE */
73 #define MHD_DEF_PAGE_SIZE_ (4096)
74 #endif /* ! PAGESIZE */
75 
79 static size_t MHD_sys_page_size_ = MHD_DEF_PAGE_SIZE_; /* Default fallback value */
80 
84 void
86 {
87 #ifdef MHD_SC_PAGESIZE
88  long result;
89  result = sysconf (MHD_SC_PAGESIZE);
90  if (-1 != result)
91  MHD_sys_page_size_ = (size_t) result;
92  else
94 #elif defined(_WIN32)
95  SYSTEM_INFO si;
96  GetSystemInfo (&si);
97  MHD_sys_page_size_ = (size_t) si.dwPageSize;
98 #else
100 #endif /* _WIN32 */
101 }
102 
103 
108 struct MemoryPool
109 {
110 
114  uint8_t *memory;
115 
119  size_t size;
120 
124  size_t pos;
125 
129  size_t end;
130 
134  bool is_mmap;
135 };
136 
137 
144 struct MemoryPool *
145 MHD_pool_create (size_t max)
146 {
147  struct MemoryPool *pool;
148  size_t alloc_size;
149 
150  pool = malloc (sizeof (struct MemoryPool));
151  if (NULL == pool)
152  return NULL;
153 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
154  if ( (max <= 32 * 1024) ||
155  (max < MHD_sys_page_size_ * 4 / 3) )
156  {
157  pool->memory = MAP_FAILED;
158  }
159  else
160  {
161  /* Round up allocation to page granularity. */
162  alloc_size = max + MHD_sys_page_size_ - 1;
163  alloc_size -= alloc_size % MHD_sys_page_size_;
164 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
165  pool->memory = mmap (NULL,
166  alloc_size,
167  PROT_READ | PROT_WRITE,
168  MAP_PRIVATE | MAP_ANONYMOUS,
169  -1,
170  0);
171 #elif defined(_WIN32)
172  pool->memory = VirtualAlloc (NULL,
173  alloc_size,
174  MEM_COMMIT | MEM_RESERVE,
175  PAGE_READWRITE);
176 #endif /* _WIN32 */
177  }
178 #else /* ! _WIN32 && ! MAP_ANONYMOUS */
179  pool->memory = MAP_FAILED;
180 #endif /* ! _WIN32 && ! MAP_ANONYMOUS */
181  if (MAP_FAILED == pool->memory)
182  {
183  alloc_size = ROUND_TO_ALIGN (max);
184  pool->memory = malloc (alloc_size);
185  if (NULL == pool->memory)
186  {
187  free (pool);
188  return NULL;
189  }
190  pool->is_mmap = false;
191  }
192 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
193  else
194  {
195  pool->is_mmap = true;
196  }
197 #endif /* _WIN32 || MAP_ANONYMOUS */
198  pool->pos = 0;
199  pool->end = alloc_size;
200  pool->size = alloc_size;
201  return pool;
202 }
203 
204 
210 void
211 MHD_pool_destroy (struct MemoryPool *pool)
212 {
213  if (NULL == pool)
214  return;
215 
216  mhd_assert (pool->end >= pool->pos);
217  mhd_assert (pool->size >= pool->end - pool->pos);
218  if (! pool->is_mmap)
219  free (pool->memory);
220  else
221 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
222  munmap (pool->memory,
223  pool->size);
224 #elif defined(_WIN32)
225  VirtualFree (pool->memory,
226  0,
227  MEM_RELEASE);
228 #else
229  abort ();
230 #endif
231  free (pool);
232 }
233 
234 
241 size_t
242 MHD_pool_get_free (struct MemoryPool *pool)
243 {
244  mhd_assert (pool->end >= pool->pos);
245  mhd_assert (pool->size >= pool->end - pool->pos);
246  return (pool->end - pool->pos);
247 }
248 
249 
261 void *
262 MHD_pool_allocate (struct MemoryPool *pool,
263  size_t size,
264  bool from_end)
265 {
266  void *ret;
267  size_t asize;
268 
269  mhd_assert (pool->end >= pool->pos);
270  mhd_assert (pool->size >= pool->end - pool->pos);
271  asize = ROUND_TO_ALIGN (size);
272  if ( (0 == asize) && (0 != size) )
273  return NULL; /* size too close to SIZE_MAX */
274  if ( (pool->pos + asize > pool->end) ||
275  (pool->pos + asize < pool->pos))
276  return NULL;
277  if (from_end)
278  {
279  ret = &pool->memory[pool->end - asize];
280  pool->end -= asize;
281  }
282  else
283  {
284  ret = &pool->memory[pool->pos];
285  pool->pos += asize;
286  }
287  return ret;
288 }
289 
290 
308 void *
309 MHD_pool_reallocate (struct MemoryPool *pool,
310  void *old,
311  size_t old_size,
312  size_t new_size)
313 {
314  size_t asize;
315  uint8_t *new_blc;
316 
317  mhd_assert (pool->end >= pool->pos);
318  mhd_assert (pool->size >= pool->end - pool->pos);
319  mhd_assert (old != NULL || old_size == 0);
320  mhd_assert (old == NULL || pool->memory <= (uint8_t*) old);
321  mhd_assert (old == NULL || pool->memory + pool->size >= (uint8_t*) old
322  + old_size);
323  /* Blocks "from the end" must not be reallocated */
324  mhd_assert (old == NULL || pool->memory + pool->pos > (uint8_t*) old);
325 
326  if (0 != old_size)
327  { /* Need to save some data */
328  const size_t old_offset = (uint8_t*) old - pool->memory;
329  const bool shrinking = (old_size > new_size);
330  /* Try resizing in-place */
331  if (shrinking)
332  { /* Shrinking in-place, zero-out freed part */
333  memset ((uint8_t*) old + new_size, 0, old_size - new_size);
334  }
335  if (pool->pos == ROUND_TO_ALIGN (old_offset + old_size))
336  { /* "old" block is the last allocated block */
337  const size_t new_apos = ROUND_TO_ALIGN (old_offset + new_size);
338  if (! shrinking)
339  { /* Grow in-place, check for enough space. */
340  if ( (new_apos > pool->end) ||
341  (new_apos < pool->pos) ) /* Value wrap */
342  return NULL; /* No space */
343  }
344  /* Resized in-place */
345  pool->pos = new_apos;
346  return old;
347  }
348  if (shrinking)
349  return old; /* Resized in-place, freed part remains allocated */
350  }
351  /* Need to allocate new block */
352  asize = ROUND_TO_ALIGN (new_size);
353  if ( ( (0 == asize) &&
354  (0 != new_size) ) || /* Value wrap, too large new_size. */
355  (asize > pool->end - pool->pos) ) /* Not enough space */
356  return NULL;
357 
358  new_blc = pool->memory + pool->pos;
359  pool->pos += asize;
360 
361  if (0 != old_size)
362  {
363  /* Move data to new block, old block remains allocated */
364  memcpy (new_blc, old, old_size);
365  /* Zero-out old block */
366  memset (old, 0, old_size);
367  }
368  return new_blc;
369 }
370 
371 
385 void *
386 MHD_pool_reset (struct MemoryPool *pool,
387  void *keep,
388  size_t copy_bytes,
389  size_t new_size)
390 {
391  mhd_assert (pool->end >= pool->pos);
392  mhd_assert (pool->size >= pool->end - pool->pos);
393  mhd_assert (copy_bytes < new_size);
394  mhd_assert (keep != NULL || copy_bytes == 0);
395  mhd_assert (keep == NULL || pool->memory <= (uint8_t*) keep);
396  mhd_assert (keep == NULL || pool->memory + pool->size >= (uint8_t*) keep
397  + copy_bytes);
398  if ( (NULL != keep) &&
399  (keep != pool->memory) )
400  {
401  if (0 != copy_bytes)
402  memmove (pool->memory,
403  keep,
404  copy_bytes);
405  }
406  /* technically not needed, but safer to zero out */
407  if (pool->size > copy_bytes)
408  {
409  size_t to_zero;
411  to_zero = pool->size - copy_bytes;
412 #ifdef _WIN32
413  if (pool->is_mmap)
414  {
415  size_t to_recommit;
416  uint8_t *recommit_addr;
417  /* Round down to page size */
418  to_recommit = to_zero - to_zero % MHD_sys_page_size_;
419  recommit_addr = pool->memory + pool->size - to_recommit;
420 
421  /* De-committing and re-committing again clear memory and make
422  * pages free / available for other needs until accessed. */
423  if (VirtualFree (recommit_addr,
424  to_recommit,
425  MEM_DECOMMIT))
426  {
427  to_zero -= to_recommit;
428 
429  if (recommit_addr != VirtualAlloc (recommit_addr,
430  to_recommit,
431  MEM_COMMIT,
432  PAGE_READWRITE))
433  abort (); /* Serious error, must never happen */
434  }
435  }
436 #endif /* _WIN32 */
437  memset (&pool->memory[copy_bytes],
438  0,
439  to_zero);
440  }
441  pool->pos = ROUND_TO_ALIGN (new_size);
442  pool->end = pool->size;
443  return pool->memory;
444 }
445 
446 
447 /* end of memorypool.c */
MHD_sys_page_size_
static size_t MHD_sys_page_size_
Definition: memorypool.c:79
memorypool.h
memory pool; mostly used for efficient (de)allocation for each connection and bounding memory use for...
MHD_pool_create
struct MemoryPool * MHD_pool_create(size_t max)
Definition: memorypool.c:102
MHD_pool_reset
void * MHD_pool_reset(struct MemoryPool *pool, void *keep, size_t copy_bytes, size_t new_size)
Definition: memorypool.c:314
NULL
#define NULL
Definition: reason_phrase.c:30
mhd_assert.h
macros for mhd_assert()
ROUND_TO_ALIGN
#define ROUND_TO_ALIGN(n)
Definition: memorypool.c:65
MHD_pool_reallocate
void * MHD_pool_reallocate(struct MemoryPool *pool, void *old, size_t old_size, size_t new_size)
Definition: memorypool.c:248
MHD_pool_destroy
void MHD_pool_destroy(struct MemoryPool *pool)
Definition: memorypool.c:157
MHD_DEF_PAGE_SIZE_
#define MHD_DEF_PAGE_SIZE_
Definition: memorypool.c:73
mhd_assert
#define mhd_assert(CHK)
Definition: mhd_assert.h:39
MHD_pool_get_free
size_t MHD_pool_get_free(struct MemoryPool *pool)
Definition: memorypool.c:185
MAP_FAILED
#define MAP_FAILED
Definition: memorypool.c:54
MHD_pool_allocate
void * MHD_pool_allocate(struct MemoryPool *pool, size_t size, int from_end)
Definition: memorypool.c:203
MHD_init_mem_pools_
void MHD_init_mem_pools_(void)
Definition: memorypool.c:85