Prusa MINI Firmware overview
mem.c File Reference
#include "lwip/opt.h"
#include "lwip/mem.h"
#include "lwip/def.h"
#include "lwip/sys.h"
#include "lwip/stats.h"
#include "lwip/err.h"
#include <string.h>

Classes

struct  mem
 

Macros

#define MIN_SIZE   12
 
#define MIN_SIZE_ALIGNED   LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
 
#define SIZEOF_STRUCT_MEM   LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
 
#define MEM_SIZE_ALIGNED   LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
 
#define LWIP_RAM_HEAP_POINTER   ram_heap
 
#define LWIP_MEM_FREE_DECL_PROTECT()
 
#define LWIP_MEM_FREE_PROTECT()   sys_mutex_lock(&mem_mutex)
 
#define LWIP_MEM_FREE_UNPROTECT()   sys_mutex_unlock(&mem_mutex)
 
#define LWIP_MEM_ALLOC_DECL_PROTECT()
 
#define LWIP_MEM_ALLOC_PROTECT()
 
#define LWIP_MEM_ALLOC_UNPROTECT()
 

Functions

 LWIP_DECLARE_MEMORY_ALIGNED (ram_heap, MEM_SIZE_ALIGNED+(2U *SIZEOF_STRUCT_MEM))
 
static void plug_holes (struct mem *mem)
 
void mem_init (void)
 
void mem_free (void *rmem)
 
voidmem_trim (void *rmem, mem_size_t newsize)
 
voidmem_malloc (mem_size_t size)
 
voidmem_calloc (mem_size_t count, mem_size_t size)
 

Variables

static u8_tram
 
static struct memram_end
 
static struct memlfree
 
static sys_mutex_t mem_mutex
 

Detailed Description

Dynamic memory manager

This is a lightweight replacement for the standard C library malloc().

If you want to use the standard C library malloc() instead, define MEM_LIBC_MALLOC to 1 in your lwipopts.h

To let mem_malloc() use pools (prevents fragmentation and is much faster than a heap but might waste some memory), define MEM_USE_POOLS to 1, define MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list of pools like this (more pools can be added between _START and _END):

Define three pools with sizes 256, 512, and 1512 bytes LWIP_MALLOC_MEMPOOL_START LWIP_MALLOC_MEMPOOL(20, 256) LWIP_MALLOC_MEMPOOL(10, 512) LWIP_MALLOC_MEMPOOL(5, 1512) LWIP_MALLOC_MEMPOOL_END

Macro Definition Documentation

◆ MIN_SIZE

#define MIN_SIZE   12

All allocated blocks will be MIN_SIZE bytes big, at least! MIN_SIZE can be overridden to suit your needs. Smaller values save space, larger values could prevent too small blocks to fragment the RAM too much.

◆ MIN_SIZE_ALIGNED

#define MIN_SIZE_ALIGNED   LWIP_MEM_ALIGN_SIZE(MIN_SIZE)

◆ SIZEOF_STRUCT_MEM

#define SIZEOF_STRUCT_MEM   LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))

◆ MEM_SIZE_ALIGNED

#define MEM_SIZE_ALIGNED   LWIP_MEM_ALIGN_SIZE(MEM_SIZE)

◆ LWIP_RAM_HEAP_POINTER

#define LWIP_RAM_HEAP_POINTER   ram_heap

◆ LWIP_MEM_FREE_DECL_PROTECT

#define LWIP_MEM_FREE_DECL_PROTECT ( )

◆ LWIP_MEM_FREE_PROTECT

#define LWIP_MEM_FREE_PROTECT ( )    sys_mutex_lock(&mem_mutex)

◆ LWIP_MEM_FREE_UNPROTECT

#define LWIP_MEM_FREE_UNPROTECT ( )    sys_mutex_unlock(&mem_mutex)

◆ LWIP_MEM_ALLOC_DECL_PROTECT

#define LWIP_MEM_ALLOC_DECL_PROTECT ( )

◆ LWIP_MEM_ALLOC_PROTECT

#define LWIP_MEM_ALLOC_PROTECT ( )

◆ LWIP_MEM_ALLOC_UNPROTECT

#define LWIP_MEM_ALLOC_UNPROTECT ( )

Function Documentation

◆ LWIP_DECLARE_MEMORY_ALIGNED()

LWIP_DECLARE_MEMORY_ALIGNED ( ram_heap  ,
MEM_SIZE_ALIGNED 2U *SIZEOF_STRUCT_MEM 
)

If you want to relocate the heap to external memory, simply define LWIP_RAM_HEAP_POINTER as a void-pointer to that location. If so, make sure the memory at that location is big enough (see below on how that space is calculated). the heap. we need one struct mem at the end and some room for alignment

◆ plug_holes()

static void plug_holes ( struct mem mem)
static

"Plug holes" by combining adjacent empty struct mems. After this function is through, there should not exist one empty struct mem pointing to another empty struct mem.

Parameters
memthis points to a struct mem which just has been freed
345 {
346  struct mem *nmem;
347  struct mem *pmem;
348 
349  LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
350  LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
351  LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
352 
353  /* plug hole forward */
354  LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
355 
356  nmem = (struct mem *)(void *)&ram[mem->next];
357  if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
358  /* if mem->next is unused and not end of ram, combine mem and mem->next */
359  if (lfree == nmem) {
360  lfree = mem;
361  }
362  mem->next = nmem->next;
363  ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram);
364  }
365 
366  /* plug hole backward */
367  pmem = (struct mem *)(void *)&ram[mem->prev];
368  if (pmem != mem && pmem->used == 0) {
369  /* if mem->prev is unused, combine mem and mem->prev */
370  if (lfree == mem) {
371  lfree = pmem;
372  }
373  pmem->next = mem->next;
374  ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram);
375  }
376 }
Here is the caller graph for this function:

◆ mem_init()

void mem_init ( void  )

Zero the heap and initialize start, end and lowest-free

383 {
384  struct mem *mem;
385 
386  LWIP_ASSERT("Sanity check alignment",
387  (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
388 
389  /* align the heap */
391  /* initialize the start of the heap */
392  mem = (struct mem *)(void *)ram;
394  mem->prev = 0;
395  mem->used = 0;
396  /* initialize the end of the heap */
397  ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
398  ram_end->used = 1;
401 
402  /* initialize the lowest-free pointer to the start of the heap */
403  lfree = (struct mem *)(void *)ram;
404 
406 
407  if (sys_mutex_new(&mem_mutex) != ERR_OK) {
408  LWIP_ASSERT("failed to create mem_mutex", 0);
409  }
410 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ mem_free()

void mem_free ( void rmem)

Put a struct mem back on the heap

Parameters
rmemis the data portion of a struct mem as returned by a previous call to mem_malloc()
420 {
421  struct mem *mem;
423 
424  if (rmem == NULL) {
425  LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
426  return;
427  }
428  LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
429 
430  LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
431  (u8_t *)rmem < (u8_t *)ram_end);
432 
433  if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
435  LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
436  /* protect mem stats from concurrent access */
437  SYS_ARCH_PROTECT(lev);
438  MEM_STATS_INC(illegal);
439  SYS_ARCH_UNPROTECT(lev);
440  return;
441  }
442  /* protect the heap from concurrent access */
444  /* Get the corresponding struct mem ... */
445  /* cast through void* to get rid of alignment warnings */
446  mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
447  /* ... which has to be in a used state ... */
448  LWIP_ASSERT("mem_free: mem->used", mem->used);
449  /* ... and is now unused. */
450  mem->used = 0;
451 
452  if (mem < lfree) {
453  /* the newly freed struct is now the lowest */
454  lfree = mem;
455  }
456 
458 
459  /* finally, see if prev or next are free also */
460  plug_holes(mem);
461 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
462  mem_free_count = 1;
463 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
465 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ mem_trim()

void* mem_trim ( void rmem,
mem_size_t  newsize 
)

Shrink memory returned by mem_malloc().

Parameters
rmempointer to memory allocated by mem_malloc the is to be shrinked
newsizerequired size after shrinking (needs to be smaller than or equal to the previous size)
Returns
for compatibility reasons: is always == rmem, at the moment or NULL if newsize is > old size, in which case rmem is NOT touched or freed!
479 {
481  mem_size_t ptr, ptr2;
482  struct mem *mem, *mem2;
483  /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
485 
486  /* Expand the size of the allocated memory region so that we can
487  adjust for alignment. */
488  newsize = LWIP_MEM_ALIGN_SIZE(newsize);
489 
490  if (newsize < MIN_SIZE_ALIGNED) {
491  /* every data block must be at least MIN_SIZE_ALIGNED long */
492  newsize = MIN_SIZE_ALIGNED;
493  }
494 
495  if (newsize > MEM_SIZE_ALIGNED) {
496  return NULL;
497  }
498 
499  LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
500  (u8_t *)rmem < (u8_t *)ram_end);
501 
502  if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
504  LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
505  /* protect mem stats from concurrent access */
506  SYS_ARCH_PROTECT(lev);
507  MEM_STATS_INC(illegal);
508  SYS_ARCH_UNPROTECT(lev);
509  return rmem;
510  }
511  /* Get the corresponding struct mem ... */
512  /* cast through void* to get rid of alignment warnings */
513  mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
514  /* ... and its offset pointer */
515  ptr = (mem_size_t)((u8_t *)mem - ram);
516 
517  size = mem->next - ptr - SIZEOF_STRUCT_MEM;
518  LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
519  if (newsize > size) {
520  /* not supported */
521  return NULL;
522  }
523  if (newsize == size) {
524  /* No change in size, simply return */
525  return rmem;
526  }
527 
528  /* protect the heap from concurrent access */
530 
531  mem2 = (struct mem *)(void *)&ram[mem->next];
532  if (mem2->used == 0) {
533  /* The next struct is unused, we can simply move it at little */
535  /* remember the old next pointer */
536  next = mem2->next;
537  /* create new struct mem which is moved directly after the shrinked mem */
538  ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
539  if (lfree == mem2) {
540  lfree = (struct mem *)(void *)&ram[ptr2];
541  }
542  mem2 = (struct mem *)(void *)&ram[ptr2];
543  mem2->used = 0;
544  /* restore the next pointer */
545  mem2->next = next;
546  /* link it back to mem */
547  mem2->prev = ptr;
548  /* link mem to it */
549  mem->next = ptr2;
550  /* last thing to restore linked list: as we have moved mem2,
551  * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
552  * the end of the heap */
553  if (mem2->next != MEM_SIZE_ALIGNED) {
554  ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
555  }
556  MEM_STATS_DEC_USED(used, (size - newsize));
557  /* no need to plug holes, we've already done that */
558  } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
559  /* Next struct is used but there's room for another struct mem with
560  * at least MIN_SIZE_ALIGNED of data.
561  * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
562  * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
563  * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
564  * region that couldn't hold data, but when mem->next gets freed,
565  * the 2 regions would be combined, resulting in more free memory */
566  ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
567  mem2 = (struct mem *)(void *)&ram[ptr2];
568  if (mem2 < lfree) {
569  lfree = mem2;
570  }
571  mem2->used = 0;
572  mem2->next = mem->next;
573  mem2->prev = ptr;
574  mem->next = ptr2;
575  if (mem2->next != MEM_SIZE_ALIGNED) {
576  ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
577  }
578  MEM_STATS_DEC_USED(used, (size - newsize));
579  /* the original mem->next is used, so no need to plug holes! */
580  }
581  /* else {
582  next struct mem is used but size between mem and mem2 is not big enough
583  to create another struct mem
584  -> don't do anyhting.
585  -> the remaining space stays unused since it is too small
586  } */
587 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
588  mem_free_count = 1;
589 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
591  return rmem;
592 }
Here is the caller graph for this function:

◆ mem_malloc()

void* mem_malloc ( mem_size_t  size)

Allocate a block of memory with a minimum of 'size' bytes.

Parameters
sizeis the minimum size of the requested block in bytes.
Returns
pointer to allocated memory or NULL if no free memory was found.

Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).

604 {
605  mem_size_t ptr, ptr2;
606  struct mem *mem, *mem2;
607 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
608  u8_t local_mem_free_count = 0;
609 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
611 
612  if (size == 0) {
613  return NULL;
614  }
615 
616  /* Expand the size of the allocated memory region so that we can
617  adjust for alignment. */
619 
620  if (size < MIN_SIZE_ALIGNED) {
621  /* every data block must be at least MIN_SIZE_ALIGNED long */
623  }
624 
625  if (size > MEM_SIZE_ALIGNED) {
626  return NULL;
627  }
628 
629  /* protect the heap from concurrent access */
632 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
633  /* run as long as a mem_free disturbed mem_malloc or mem_trim */
634  do {
635  local_mem_free_count = 0;
636 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
637 
638  /* Scan through the heap searching for a free block that is big enough,
639  * beginning with the lowest free block.
640  */
641  for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
642  ptr = ((struct mem *)(void *)&ram[ptr])->next) {
643  mem = (struct mem *)(void *)&ram[ptr];
644 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
645  mem_free_count = 0;
647  /* allow mem_free or mem_trim to run */
649  if (mem_free_count != 0) {
650  /* If mem_free or mem_trim have run, we have to restart since they
651  could have altered our current struct mem. */
652  local_mem_free_count = 1;
653  break;
654  }
655 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
656 
657  if ((!mem->used) &&
658  (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
659  /* mem is not used and at least perfect fit is possible:
660  * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
661 
663  /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
664  * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
665  * -> split large block, create empty remainder,
666  * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
667  * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
668  * struct mem would fit in but no data between mem2 and mem2->next
669  * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
670  * region that couldn't hold data, but when mem->next gets freed,
671  * the 2 regions would be combined, resulting in more free memory
672  */
673  ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
674  /* create mem2 struct */
675  mem2 = (struct mem *)(void *)&ram[ptr2];
676  mem2->used = 0;
677  mem2->next = mem->next;
678  mem2->prev = ptr;
679  /* and insert it between mem and mem->next */
680  mem->next = ptr2;
681  mem->used = 1;
682 
683  if (mem2->next != MEM_SIZE_ALIGNED) {
684  ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
685  }
687  } else {
688  /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
689  * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
690  * take care of this).
691  * -> near fit or exact fit: do not split, no mem2 creation
692  * also can't move mem->next directly behind mem, since mem->next
693  * will always be used at this point!
694  */
695  mem->used = 1;
697  }
698 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
699 mem_malloc_adjust_lfree:
700 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
701  if (mem == lfree) {
702  struct mem *cur = lfree;
703  /* Find next free block after mem and update lowest free pointer */
704  while (cur->used && cur != ram_end) {
705 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
706  mem_free_count = 0;
708  /* prevent high interrupt latency... */
710  if (mem_free_count != 0) {
711  /* If mem_free or mem_trim have run, we have to restart since they
712  could have altered our current struct mem or lfree. */
713  goto mem_malloc_adjust_lfree;
714  }
715 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
716  cur = (struct mem *)(void *)&ram[cur->next];
717  }
718  lfree = cur;
719  LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
720  }
723  LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
725  LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
727  LWIP_ASSERT("mem_malloc: sanity check alignment",
728  (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
729 
730  return (u8_t *)mem + SIZEOF_STRUCT_MEM;
731  }
732  }
733 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
734  /* if we got interrupted by a mem_free, try again */
735  } while (local_mem_free_count != 0);
736 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
737  LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
738  MEM_STATS_INC(err);
741  return NULL;
742 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ mem_calloc()

void* mem_calloc ( mem_size_t  count,
mem_size_t  size 
)

Contiguously allocates enough space for count objects that are size bytes of memory each and returns a pointer to the allocated memory.

The allocated memory is filled with bytes of value zero.

Parameters
countnumber of objects to allocate
sizesize of the objects to allocate
Returns
pointer to allocated memory / NULL pointer if there is an error
766 {
767  void *p;
768 
769  /* allocate 'count' objects of size 'size' */
770  p = mem_malloc(count * size);
771  if (p) {
772  /* zero the memory */
773  memset(p, 0, (size_t)count * (size_t)size);
774  }
775  return p;
776 }
Here is the call graph for this function:
Here is the caller graph for this function:

Variable Documentation

◆ ram

u8_t* ram
static

pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array

◆ ram_end

struct mem* ram_end
static

the last entry, always unused!

◆ lfree

struct mem* lfree
static

pointer to the lowest free block, this is used for faster search

◆ mem_mutex

sys_mutex_t mem_mutex
static

concurrent access protection

S16_F
#define S16_F
Definition: arch.h:146
SYS_ARCH_UNPROTECT
#define SYS_ARCH_UNPROTECT(lev)
Definition: sys.h:403
LWIP_MEM_ALLOC_PROTECT
#define LWIP_MEM_ALLOC_PROTECT()
Definition: mem.c:326
s16_t
int16_t s16_t
Definition: arch.h:122
LWIP_ASSERT
#define LWIP_ASSERT(message, assertion)
Definition: debug.h:116
LWIP_MEM_ALLOC_DECL_PROTECT
#define LWIP_MEM_ALLOC_DECL_PROTECT()
Definition: mem.c:325
mem
Definition: mem.c:264
LWIP_DBG_LEVEL_SEVERE
#define LWIP_DBG_LEVEL_SEVERE
Definition: debug.h:59
MIN_SIZE_ALIGNED
#define MIN_SIZE_ALIGNED
Definition: mem.c:280
ram
static u8_t * ram
Definition: mem.c:295
mem::used
u8_t used
Definition: mem.c:270
SYS_ARCH_PROTECT
#define SYS_ARCH_PROTECT(lev)
Definition: sys.h:402
LWIP_DBG_TRACE
#define LWIP_DBG_TRACE
Definition: debug.h:83
MEM_ALIGNMENT
#define MEM_ALIGNMENT
Definition: opt.h:248
LWIP_MEM_FREE_UNPROTECT
#define LWIP_MEM_FREE_UNPROTECT()
Definition: mem.c:323
sys_mutex_lock
void sys_mutex_lock(sys_mutex_t *mutex)
Definition: sys_arch.c:355
MEM_STATS_DEC_USED
#define MEM_STATS_DEC_USED(x, y)
Definition: stats.h:398
ram_end
static struct mem * ram_end
Definition: mem.c:297
MEM_DEBUG
#define MEM_DEBUG
Definition: opt.h:2715
mem::prev
mem_size_t prev
Definition: mem.c:268
NULL
#define NULL
Definition: usbd_def.h:53
LWIP_DBG_LEVEL_SERIOUS
#define LWIP_DBG_LEVEL_SERIOUS
Definition: debug.h:57
mem::next
mem_size_t next
Definition: mem.c:266
lfree
static struct mem * lfree
Definition: mem.c:299
u8_t
uint8_t u8_t
Definition: arch.h:119
LWIP_MEM_FREE_PROTECT
#define LWIP_MEM_FREE_PROTECT()
Definition: mem.c:322
LWIP_MEM_ALIGN_SIZE
#define LWIP_MEM_ALIGN_SIZE(size)
Definition: arch.h:214
LWIP_MEM_ALIGN
#define LWIP_MEM_ALIGN(addr)
Definition: arch.h:229
sys_mutex_new
err_t sys_mutex_new(sys_mutex_t *mutex)
Definition: sys_arch.c:319
ERR_OK
Definition: err.h:63
sys_mutex_unlock
void sys_mutex_unlock(sys_mutex_t *mutex)
Definition: sys_arch.c:362
mem_ptr_t
uintptr_t mem_ptr_t
Definition: arch.h:125
SYS_ARCH_DECL_PROTECT
#define SYS_ARCH_DECL_PROTECT(lev)
Definition: sys.h:401
LWIP_MEM_ALLOC_UNPROTECT
#define LWIP_MEM_ALLOC_UNPROTECT()
Definition: mem.c:327
mem_malloc
void * mem_malloc(mem_size_t size)
Definition: mem.c:603
plug_holes
static void plug_holes(struct mem *mem)
Definition: mem.c:344
LWIP_MEM_FREE_DECL_PROTECT
#define LWIP_MEM_FREE_DECL_PROTECT()
Definition: mem.c:321
MEM_STATS_AVAIL
#define MEM_STATS_AVAIL(x, y)
Definition: stats.h:395
mem_size_t
u16_t mem_size_t
Definition: mem.h:67
LWIP_RAM_HEAP_POINTER
#define LWIP_RAM_HEAP_POINTER
Definition: mem.c:291
MEM_STATS_INC
#define MEM_STATS_INC(x)
Definition: stats.h:396
LWIP_DEBUGF
#define LWIP_DEBUGF(debug, message)
Definition: debug.h:164
SIZEOF_STRUCT_MEM
#define SIZEOF_STRUCT_MEM
Definition: mem.c:281
MEM_SIZE_ALIGNED
#define MEM_SIZE_ALIGNED
Definition: mem.c:282
MEM_STATS_INC_USED
#define MEM_STATS_INC_USED(x, y)
Definition: stats.h:397
size
static png_bytep size_t size
Definition: pngwrite.c:2170
mem_mutex
static sys_mutex_t mem_mutex
Definition: mem.c:303