nckernel  0.1
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
malloc.c
Go to the documentation of this file.
1 #include <sys/types.h>
2 #include <stdio.h>
3 #include <stddef.h>
4 #include <stdint.h>
5 #include <stdlib.h>
6 #include <string.h> /* memset */
7 #include <unistd.h>
8 #include <errno.h>
9 #include <malloc.h>
10 
11 #include <list.h>
12 
13 #include <debug.h>
14 #include <interrupt.h>
15 
23 #define CAN_BE_MERGED(prev, next) \
24  ((struct chunk*)(((struct chunk*)(prev))->ptr + ((struct chunk*)(prev))->size) == next)
25 
26 #define ALIGNED(align, size) \
27  ((uint32_t)((uint32_t)(size) + ((align) - ((uint32_t)(size) & ((align) - 1)))))
28 
30 struct chunk {
31  struct list_head head;
33  char ptr[];
34 } __PACKED;
35 
37 static LIST_HEAD(__used);
38 
40 static LIST_HEAD(__free);
41 
42 static struct chunk_stat {
43  unsigned long base;
44  struct {
45  unsigned long total;
46  unsigned long used;
47  } size;
48 } s_stat;
49 
50 static inline struct chunk *find_item(void *ptr, uint32_t align)
51 {
52  struct list_head *pos;
53  struct chunk *item;
54 
55  list_for_each(pos, &__used) {
56  item = list_entry(pos, struct chunk, head);
57  if ((uint32_t)item < (uint32_t)ptr && (ALIGNED(align, item->ptr) == (uint32_t)ptr)) {
58  return item;
59  }
60  }
61 
62  return NULL;
63 }
64 
66 {
67  struct chunk *item;
68 
69  item = (struct chunk*)base;
70  item->size = size - sizeof(*item);
71 
72  s_stat.base = (unsigned long)base;
73  s_stat.size.total = item->size;
74  s_stat.size.used = 0;
75 
76  list_add_tail(&item->head, &__free);
77 }
78 
79 void *malloc(size_t size)
80 {
81  struct chunk *item;
82  struct list_head *pos;
83  struct list_head *n;
84  int chunk_size;
85  register int internal_fragment;
86  uint32_t align;
87  unsigned long flags;
88 
89  irq_local_save(&flags);
91 
92  align = sysconf(_SC_PAGESIZE);
93  chunk_size = size + sizeof(*item);
94 
95  do {
96  list_for_each_safe(pos, n, &__free) {
97  item = list_entry(pos, struct chunk, head);
98 
99  internal_fragment = item->size - chunk_size;
100  if (internal_fragment < 0) {
101  continue;
102  }
103 
104  if (internal_fragment <= sizeof(*item)) {
105  list_del(pos);
106  } else {
107  struct chunk *new_item;
119  item->size -= chunk_size;
120  new_item = (struct chunk *)(item->ptr + item->size);
121  new_item->size = size;
122  item = new_item;
123 
124  s_stat.size.used += chunk_size;
125  }
126 
127  list_add_tail(&item->head, &__used);
129  irq_local_restore(flags);
130  return item->ptr;
131  }
132  } while (sbrk(ALIGNED(align, chunk_size)));
133 
135  irq_local_restore(flags);
136  return NULL;
137 }
138 
139 void *calloc(size_t nmemb, size_t size)
140 {
141  void *ret;
142 
151  size *= nmemb;
152  if (!size) {
153  return NULL;
154  }
155 
156  ret = malloc(size);
157  if (!ret) {
158  return NULL;
159  }
160 
161  memset(ret, 0, size);
162  return ret;
163 }
164 
165 int posix_memalign(void **memptr, size_t align, size_t size)
166 {
167  struct chunk *item;
168  struct list_head *pos;
169  struct list_head *n;
170  uint32_t chunk_size;
171  register int internal_fragment;
172  unsigned long flags;
173 
174  irq_local_save(&flags);
176 
177  do {
178  list_for_each_safe(pos, n, &__free) {
179  item = list_entry(pos, struct chunk, head);
180 
181  chunk_size = ALIGNED(align, item->ptr);
182  chunk_size -= (uint32_t)item->ptr;
183  chunk_size += size + sizeof(*item);
184 
185  internal_fragment = item->size - chunk_size;
186  if (internal_fragment < 0) {
187  continue;
188  }
189 
190  if (internal_fragment <= sizeof(*item)) {
191  list_del(pos);
192  } else {
193  struct chunk *new_item;
194 
195  item->size -= chunk_size;
196  new_item = (struct chunk *)(item->ptr + item->size);
197  new_item->size = size;
198  item = new_item;
199 
200  s_stat.size.used += chunk_size;
201  }
202 
203  list_add_tail(&item->head, &__used);
204 
205  *memptr = (void *)ALIGNED(align, item->ptr);
207  irq_local_restore(flags);
208  return 0;
209  }
210  } while (sbrk(ALIGNED(align, chunk_size)));
211 
213  irq_local_restore(flags);
214  return -ENOMEM;
215 }
216 
217 void free(void *ptr)
218 {
219  struct list_head *pos;
220  struct list_head *n;
221  struct chunk *item;
222  struct chunk *tmp;
223  int merged;
224  uint32_t align;
225  unsigned long flags;
226 
227  irq_local_save(&flags);
229 
230  item = NULL;
231 
232  align = sysconf(_SC_PAGESIZE);
233  if (ALIGNED(align, ptr) == (uint32_t)ptr) {
234  item = find_item(ptr, align);
235  }
236 
237  if (!item) {
238  item = container_of(ptr, struct chunk, ptr);
239  }
240 
241  list_del(&item->head);
242 
243  s_stat.size.used -= (item->size + sizeof(*item));
244 
245  merged = 0;
246  list_for_each_safe(pos, n, &__free) {
247  tmp = list_entry(pos, struct chunk, head);
248 
249  if (tmp > item) {
250  if (CAN_BE_MERGED(item, tmp)) {
251  item->size += tmp->size + sizeof(*tmp);
252  tmp->head.next->prev = &item->head;
253  if (!merged) {
254  tmp->head.prev->next = &item->head;
255  item->head = tmp->head;
256  merged = 1;
257  }
263  continue;
264  }
265 
266  if (!merged) {
271  tmp->head.prev->next = &item->head;
272  item->head.prev = tmp->head.prev;
273 
274  item->head.next = &tmp->head;
275  tmp->head.prev = &item->head;
276  }
277 
278  break;
279  }
280 
281  if (CAN_BE_MERGED(tmp, item)) {
282  tmp->size += item->size + sizeof(*item);
283  item = tmp;
284  merged = 1;
289  continue;
290  }
291  }
292 
294  irq_local_restore(flags);
295 }
296 
297 int brk(void *ptr)
298 {
299  unsigned long total;
300  unsigned long boundary;
301  struct chunk *item;
302  unsigned long flags;
303 
304  irq_local_save(&flags);
306 
307  total = (unsigned long)ptr - s_stat.base;
308  if (s_stat.size.used > total) {
310  irq_local_restore(flags);
311  return -EBUSY;
312  }
313 
314  boundary = s_stat.base + s_stat.size.total;
315  item = list_entry(__free.prev, struct chunk, head);
316 
317  if (((unsigned long)item + item->size) == boundary) {
318  ssize_t tmp;
319  tmp = item->size + total - s_stat.size.total;
320  if (tmp < 0) {
322  irq_local_restore(flags);
323  return -EBUSY;
324  }
325 
326  item->size = tmp;
327  s_stat.size.total = total;
328  } else {
329  /* 새로운 여유 청크를 만들고,
330  * 여유 청크 목록에 추가한다.
331  */
332  if ((unsigned long)ptr <= boundary) {
334  irq_local_restore(flags);
335  return -EBUSY;
336  }
337 
338  item = (struct chunk*)boundary;
339  item->size = (ssize_t)((unsigned long)ptr - boundary);
340  item->size -= sizeof(*item);
341 
342  list_add_tail(&item->head, &__free);
343  }
344 
346  irq_local_restore(flags);
347  return 0;
348 }
349 
350 void *sbrk(intptr_t increment)
351 {
352  unsigned long boundary;
353  struct chunk *item;
354  unsigned long flags;
355 
356  irq_local_save(&flags);
358 
359  boundary = s_stat.base + s_stat.size.total;
360  item = list_entry(__free.prev, struct chunk, head);
361 
362  if (((unsigned long)item + item->size) == boundary) {
363  if ((item->size + increment) < 0) {
365  irq_local_restore(flags);
366  return NULL;
367  }
368 
369  item->size += increment;
370  } else {
371  if (increment <0) {
373  irq_local_restore(flags);
374  return NULL;
375  }
376 
377  item = (struct chunk*)boundary;
378  item->size = increment - sizeof(*item);
379  list_add_tail(&item->head, &__free);
380  }
381  boundary += increment;
382 
384  irq_local_restore(flags);
385  return (void*)boundary;
386 }
387 
388 void *valloc(size_t size)
389 {
390  return memalign(sysconf(_SC_PAGESIZE), size);
391 }
392 
393 void *memalign(size_t boundary, size_t size)
394 {
395  void *ptr;
396 
397  if (posix_memalign(&ptr, boundary, size) < 0) {
398  return NULL;
399  }
400 
401  return ptr;
402 }
403 
404 /*
405 int chunk_stat_total(void)
406 {
407  return s_stat.size.total;
408 }
409 
410 int chunk_stat_used(void)
411 {
412  return s_stat.size.used;
413 }
414 
415 unsigned long chunk_stat_base(void)
416 {
417  return s_stat.base;
418 }
419 */
420 
421 /* End of a file */