1 /* -*- Mode: C ; c-basic-offset: 2 -*- */
2 /*****************************************************************************
4 * Non-sleeping memory allocation
6 * Copyright (C) 2006,2007 Nedko Arnaudov <nedko@arnaudov.name>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *****************************************************************************/
28 #include "memory_atomic.h"
32 struct rtsafe_memory_pool
35 size_t min_preallocated;
36 size_t max_preallocated;
38 unsigned int used_count;
39 struct list_head unused;
40 unsigned int unused_count;
42 bool enforce_thread_safety;
43 /* next members are initialized/used only if enforce_thread_safety is true */
44 pthread_mutex_t mutex;
45 unsigned int unused_count2;
46 struct list_head pending;
49 #define RTSAFE_GROUPS_PREALLOCATE 1024
52 rtsafe_memory_pool_create(
54 size_t min_preallocated,
55 size_t max_preallocated,
56 bool enforce_thread_safety,
57 rtsafe_memory_pool_handle * pool_handle_ptr)
60 struct rtsafe_memory_pool * pool_ptr;
62 assert(min_preallocated <= max_preallocated);
64 pool_ptr = malloc(sizeof(struct rtsafe_memory_pool));
70 pool_ptr->data_size = data_size;
71 pool_ptr->min_preallocated = min_preallocated;
72 pool_ptr->max_preallocated = max_preallocated;
74 pool_ptr->used_count = 0;
76 INIT_LIST_HEAD(&pool_ptr->unused);
77 pool_ptr->unused_count = 0;
79 pool_ptr->enforce_thread_safety = enforce_thread_safety;
80 if (enforce_thread_safety)
82 ret = pthread_mutex_init(&pool_ptr->mutex, NULL);
89 INIT_LIST_HEAD(&pool_ptr->pending);
90 pool_ptr->unused_count2 = 0;
93 rtsafe_memory_pool_sleepy((rtsafe_memory_pool_handle)pool_ptr);
94 *pool_handle_ptr = pool_ptr;
99 #define pool_ptr ((struct rtsafe_memory_pool *)pool_handle)
102 rtsafe_memory_pool_destroy(
103 rtsafe_memory_pool_handle pool_handle)
106 struct list_head * node_ptr;
108 /* caller should deallocate all chunks prior releasing pool itself */
109 assert(pool_ptr->used_count == 0);
111 while (pool_ptr->unused_count != 0)
113 assert(!list_empty(&pool_ptr->unused));
115 node_ptr = pool_ptr->unused.next;
118 pool_ptr->unused_count--;
123 assert(list_empty(&pool_ptr->unused));
125 if (pool_ptr->enforce_thread_safety)
127 while (!list_empty(&pool_ptr->pending))
129 node_ptr = pool_ptr->pending.next;
136 ret = pthread_mutex_destroy(&pool_ptr->mutex);
143 /* adjust unused list size */
145 rtsafe_memory_pool_sleepy(
146 rtsafe_memory_pool_handle pool_handle)
148 struct list_head * node_ptr;
151 if (pool_ptr->enforce_thread_safety)
153 pthread_mutex_lock(&pool_ptr->mutex);
155 count = pool_ptr->unused_count2;
157 assert(pool_ptr->min_preallocated < pool_ptr->max_preallocated);
159 while (count < pool_ptr->min_preallocated)
161 node_ptr = malloc(sizeof(struct list_head) + pool_ptr->data_size);
162 if (node_ptr == NULL)
167 list_add_tail(node_ptr, &pool_ptr->pending);
172 while (count > pool_ptr->max_preallocated && !list_empty(&pool_ptr->pending))
174 node_ptr = pool_ptr->pending.next;
183 pthread_mutex_unlock(&pool_ptr->mutex);
187 while (pool_ptr->unused_count < pool_ptr->min_preallocated)
189 node_ptr = malloc(sizeof(struct list_head) + pool_ptr->data_size);
190 if (node_ptr == NULL)
195 list_add_tail(node_ptr, &pool_ptr->unused);
196 pool_ptr->unused_count++;
199 while (pool_ptr->unused_count > pool_ptr->max_preallocated)
201 assert(!list_empty(&pool_ptr->unused));
203 node_ptr = pool_ptr->unused.next;
206 pool_ptr->unused_count--;
213 /* find entry in unused list, fail if it is empty */
215 rtsafe_memory_pool_allocate(
216 rtsafe_memory_pool_handle pool_handle)
218 struct list_head * node_ptr;
220 if (list_empty(&pool_ptr->unused))
225 node_ptr = pool_ptr->unused.next;
227 pool_ptr->unused_count--;
228 pool_ptr->used_count++;
230 if (pool_ptr->enforce_thread_safety &&
231 pthread_mutex_trylock(&pool_ptr->mutex) == 0)
233 while (pool_ptr->unused_count < pool_ptr->min_preallocated && !list_empty(&pool_ptr->pending))
235 node_ptr = pool_ptr->pending.next;
238 list_add_tail(node_ptr, &pool_ptr->unused);
239 pool_ptr->unused_count++;
242 pool_ptr->unused_count2 = pool_ptr->unused_count;
244 pthread_mutex_unlock(&pool_ptr->mutex);
247 return (node_ptr + 1);
250 /* move from used to unused list */
252 rtsafe_memory_pool_deallocate(
253 rtsafe_memory_pool_handle pool_handle,
256 struct list_head * node_ptr;
258 list_add_tail((struct list_head *)data - 1, &pool_ptr->unused);
259 pool_ptr->used_count--;
260 pool_ptr->unused_count++;
262 if (pool_ptr->enforce_thread_safety &&
263 pthread_mutex_trylock(&pool_ptr->mutex) == 0)
265 while (pool_ptr->unused_count > pool_ptr->max_preallocated)
267 assert(!list_empty(&pool_ptr->unused));
269 node_ptr = pool_ptr->unused.next;
272 list_add_tail(node_ptr, &pool_ptr->pending);
273 pool_ptr->unused_count--;
276 pool_ptr->unused_count2 = pool_ptr->unused_count;
278 pthread_mutex_unlock(&pool_ptr->mutex);
283 rtsafe_memory_pool_allocate_sleepy(
284 rtsafe_memory_pool_handle pool_handle)
290 rtsafe_memory_pool_sleepy(pool_handle);
291 data = rtsafe_memory_pool_allocate(pool_handle);
293 while (data == NULL);
298 /* max alloc is DATA_MIN * (2 ^ POOLS_COUNT) - DATA_SUB */
299 #define DATA_MIN 1024
300 #define DATA_SUB 100 /* alloc slightly smaller chunks in hope to not allocating additional page for control data */
302 struct rtsafe_memory_pool_generic
305 rtsafe_memory_pool_handle pool;
310 struct rtsafe_memory_pool_generic * pools;
319 bool enforce_thread_safety,
320 rtsafe_memory_handle * handle_ptr)
324 struct rtsafe_memory * memory_ptr;
326 LOG_DEBUG("rtsafe_memory_init() called.");
328 memory_ptr = malloc(sizeof(struct rtsafe_memory));
329 if (memory_ptr == NULL)
335 memory_ptr->pools_count = 1;
337 while ((size << memory_ptr->pools_count) < max_size + DATA_SUB)
339 memory_ptr->pools_count++;
341 if (memory_ptr->pools_count > sizeof(size_t) * 8)
343 assert(0); /* chances that caller really need such huge size are close to zero */
348 memory_ptr->pools = malloc(memory_ptr->pools_count * sizeof(struct rtsafe_memory_pool_generic));
349 if (memory_ptr->pools == NULL)
356 for (i = 0 ; i < memory_ptr->pools_count ; i++)
358 memory_ptr->pools[i].size = size - DATA_SUB;
360 if (!rtsafe_memory_pool_create(
361 memory_ptr->pools[i].size,
364 enforce_thread_safety,
365 &memory_ptr->pools[i].pool))
370 rtsafe_memory_pool_destroy(memory_ptr->pools[i].pool);
373 goto fail_free_pools;
379 *handle_ptr = (rtsafe_memory_handle)memory_ptr;
384 free(memory_ptr->pools);
393 #define memory_ptr ((struct rtsafe_memory *)handle_ptr)
395 rtsafe_memory_uninit(
396 rtsafe_memory_handle handle_ptr)
400 LOG_DEBUG("rtsafe_memory_uninit() called.");
402 for (i = 0 ; i < memory_ptr->pools_count ; i++)
404 LOG_DEBUG("Destroying pool for size %u", (unsigned int)memory_ptr->pools[i].size);
405 rtsafe_memory_pool_destroy(memory_ptr->pools[i].pool);
408 free(memory_ptr->pools);
414 rtsafe_memory_allocate(
415 rtsafe_memory_handle handle_ptr,
418 rtsafe_memory_pool_handle * data_ptr;
421 LOG_DEBUG("rtsafe_memory_allocate() called.");
423 /* pool handle is stored just before user data to ease deallocation */
424 size += sizeof(rtsafe_memory_pool_handle);
426 for (i = 0 ; i < memory_ptr->pools_count ; i++)
428 if (size <= memory_ptr->pools[i].size)
430 LOG_DEBUG("Using chunk with size %u.", (unsigned int)memory_ptr->pools[i].size);
431 data_ptr = rtsafe_memory_pool_allocate(memory_ptr->pools[i].pool);
432 if (data_ptr == NULL)
434 LOG_DEBUG("rtsafe_memory_pool_allocate() failed.");
438 *data_ptr = memory_ptr->pools[i].pool;
440 LOG_DEBUG("rtsafe_memory_allocate() returning %p", (data_ptr + 1));
441 return (data_ptr + 1);
445 /* data size too big, increase POOLS_COUNT */
446 LOG_WARNING("Data size is too big");
451 rtsafe_memory_sleepy(
452 rtsafe_memory_handle handle_ptr)
456 for (i = 0 ; i < memory_ptr->pools_count ; i++)
458 rtsafe_memory_pool_sleepy(memory_ptr->pools[i].pool);
463 rtsafe_memory_deallocate(
466 LOG_DEBUG("rtsafe_memory_deallocate(%p) called.", data);
467 rtsafe_memory_pool_deallocate(
468 *((rtsafe_memory_pool_handle *)data -1),
469 (rtsafe_memory_pool_handle *)data - 1);