Prusa MINI Firmware overview
Packet buffers (PBUF)

Enumerations

enum  pbuf_layer {
  PBUF_TRANSPORT, PBUF_IP, PBUF_LINK, PBUF_RAW_TX,
  PBUF_RAW
}
 
enum  pbuf_type { PBUF_RAM, PBUF_ROM, PBUF_REF, PBUF_POOL }
 

Functions

struct pbufpbuf_alloc (pbuf_layer layer, u16_t length, pbuf_type type)
 
void pbuf_realloc (struct pbuf *p, u16_t new_len)
 
u8_t pbuf_free (struct pbuf *p)
 
void pbuf_ref (struct pbuf *p)
 
void pbuf_cat (struct pbuf *h, struct pbuf *t)
 
void pbuf_chain (struct pbuf *h, struct pbuf *t)
 
err_t pbuf_copy (struct pbuf *p_to, const struct pbuf *p_from)
 
u16_t pbuf_copy_partial (const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset)
 
struct pbufpbuf_skip (struct pbuf *in, u16_t in_offset, u16_t *out_offset)
 
err_t pbuf_take (struct pbuf *buf, const void *dataptr, u16_t len)
 
err_t pbuf_take_at (struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset)
 
struct pbufpbuf_coalesce (struct pbuf *p, pbuf_layer layer)
 
u8_t pbuf_get_at (const struct pbuf *p, u16_t offset)
 
int pbuf_try_get_at (const struct pbuf *p, u16_t offset)
 
void pbuf_put_at (struct pbuf *p, u16_t offset, u8_t data)
 
u16_t pbuf_memcmp (const struct pbuf *p, u16_t offset, const void *s2, u16_t n)
 
u16_t pbuf_memfind (const struct pbuf *p, const void *mem, u16_t mem_len, u16_t start_offset)
 

Detailed Description

Packets are built from the pbuf data structure. It supports dynamic memory allocation for packet contents or can reference externally managed packet contents both in RAM and ROM. Quick allocation for incoming packets is provided through pools with fixed sized pbufs.

A packet may span over multiple pbufs, chained as a singly linked list. This is called a "pbuf chain".

Multiple packets may be queued, also using this singly linked list. This is called a "packet queue".

So, a packet queue consists of one or more pbuf chains, each of which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE NOT SUPPORTED!!! Use helper structs to queue multiple packets.

The differences between a pbuf chain and a packet queue are very precise but subtle.

The last pbuf of a packet has a ->tot_len field that equals the ->len field. It can be found by traversing the list. If the last pbuf of a packet has a ->next field other than NULL, more packets are on the queue.

Therefore, looping through a pbuf of a single packet, has an loop end condition (tot_len == p->len), NOT (next == NULL).

Example of custom pbuf usage for zero-copy RX:

typedef struct my_custom_pbuf
{
struct pbuf_custom p;
void* dma_descriptor;
} my_custom_pbuf_t;
LWIP_MEMPOOL_DECLARE(RX_POOL, 10, sizeof(my_custom_pbuf_t), "Zero-copy RX PBUF pool");
void my_pbuf_free_custom(void* p)
{
my_custom_pbuf_t* my_puf = (my_custom_pbuf_t*)p;
LOCK_INTERRUPTS();
free_rx_dma_descriptor(my_pbuf->dma_descriptor);
LWIP_MEMPOOL_FREE(RX_POOL, my_pbuf);
UNLOCK_INTERRUPTS();
}
void eth_rx_irq()
{
dma_descriptor* dma_desc = get_RX_DMA_descriptor_from_ethernet();
my_custom_pbuf_t* my_pbuf = (my_custom_pbuf_t*)LWIP_MEMPOOL_ALLOC(RX_POOL);
my_pbuf->p.custom_free_function = my_pbuf_free_custom;
my_pbuf->dma_descriptor = dma_desc;
invalidate_cpu_cache(dma_desc->rx_data, dma_desc->rx_length);
struct pbuf* p = pbuf_alloced_custom(PBUF_RAW,
dma_desc->rx_length,
&my_pbuf->p,
dma_desc->rx_data,
dma_desc->max_buffer_size);
if(netif->input(p, netif) != ERR_OK) {
}
}

Enumeration Type Documentation

◆ pbuf_layer

enum pbuf_layer

Enumeration of pbuf layers

Enumerator
PBUF_TRANSPORT 

Includes spare room for transport layer header, e.g. UDP header. Use this if you intend to pass the pbuf to functions like udp_send().

PBUF_IP 

Includes spare room for IP header. Use this if you intend to pass the pbuf to functions like raw_send().

PBUF_LINK 

Includes spare room for link layer header (ethernet header). Use this if you intend to pass the pbuf to functions like ethernet_output().

See also
PBUF_LINK_HLEN
PBUF_RAW_TX 

Includes spare room for additional encapsulation header before ethernet headers (e.g. 802.11). Use this if you intend to pass the pbuf to functions like netif->linkoutput().

See also
PBUF_LINK_ENCAPSULATION_HLEN
PBUF_RAW 

Use this for input packets in a netif driver when calling netif->input() in the most common case - ethernet-layer netif driver.

72  {
73  /** Includes spare room for transport layer header, e.g. UDP header.
74  * Use this if you intend to pass the pbuf to functions like udp_send().
75  */
77  /** Includes spare room for IP header.
78  * Use this if you intend to pass the pbuf to functions like raw_send().
79  */
80  PBUF_IP,
81  /** Includes spare room for link layer header (ethernet header).
82  * Use this if you intend to pass the pbuf to functions like ethernet_output().
83  * @see PBUF_LINK_HLEN
84  */
85  PBUF_LINK,
86  /** Includes spare room for additional encapsulation header before ethernet
87  * headers (e.g. 802.11).
88  * Use this if you intend to pass the pbuf to functions like netif->linkoutput().
89  * @see PBUF_LINK_ENCAPSULATION_HLEN
90  */
92  /** Use this for input packets in a netif driver when calling netif->input()
93  * in the most common case - ethernet-layer netif driver. */
94  PBUF_RAW
95 } pbuf_layer;

◆ pbuf_type

enum pbuf_type

Enumeration of pbuf types

Enumerator
PBUF_RAM 

pbuf data is stored in RAM, used for TX mostly, struct pbuf and its payload are allocated in one piece of contiguous memory (so the first payload byte can be calculated from struct pbuf). pbuf_alloc() allocates PBUF_RAM pbufs as unchained pbufs (although that might change in future versions). This should be used for all OUTGOING packets (TX).

PBUF_ROM 

pbuf data is stored in ROM, i.e. struct pbuf and its payload are located in totally different memory areas. Since it points to ROM, payload does not have to be copied when queued for transmission.

PBUF_REF 

pbuf comes from the pbuf pool. Much like PBUF_ROM but payload might change so it has to be duplicated when queued before transmitting, depending on who has a 'ref' to it.

PBUF_POOL 

pbuf payload refers to RAM. This one comes from a pool and should be used for RX. Payload can be chained (scatter-gather RX) but like PBUF_RAM, struct pbuf and its payload are allocated in one piece of contiguous memory (so the first payload byte can be calculated from struct pbuf). Don't use this for TX, if the pool becomes empty e.g. because of TCP queuing, you are unable to receive TCP acks!

101  {
102  /** pbuf data is stored in RAM, used for TX mostly, struct pbuf and its payload
103  are allocated in one piece of contiguous memory (so the first payload byte
104  can be calculated from struct pbuf).
105  pbuf_alloc() allocates PBUF_RAM pbufs as unchained pbufs (although that might
106  change in future versions).
107  This should be used for all OUTGOING packets (TX).*/
108  PBUF_RAM,
109  /** pbuf data is stored in ROM, i.e. struct pbuf and its payload are located in
110  totally different memory areas. Since it points to ROM, payload does not
111  have to be copied when queued for transmission. */
112  PBUF_ROM,
113  /** pbuf comes from the pbuf pool. Much like PBUF_ROM but payload might change
114  so it has to be duplicated when queued before transmitting, depending on
115  who has a 'ref' to it. */
116  PBUF_REF,
117  /** pbuf payload refers to RAM. This one comes from a pool and should be used
118  for RX. Payload can be chained (scatter-gather RX) but like PBUF_RAM, struct
119  pbuf and its payload are allocated in one piece of contiguous memory (so
120  the first payload byte can be calculated from struct pbuf).
121  Don't use this for TX, if the pool becomes empty e.g. because of TCP queuing,
122  you are unable to receive TCP acks! */
123  PBUF_POOL
124 } pbuf_type;

Function Documentation

◆ pbuf_alloc()

struct pbuf* pbuf_alloc ( pbuf_layer  layer,
u16_t  length,
pbuf_type  type 
)

Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).

The actual memory allocated for the pbuf is determined by the layer at which the pbuf is allocated and the requested size (from the size parameter).

Parameters
layerflag to define header size
lengthsize of the pbuf's payload
typethis parameter decides how and where the pbuf should be allocated as follows:
  • PBUF_RAM: buffer memory for pbuf is allocated as one large chunk. This includes protocol headers as well.
  • PBUF_ROM: no buffer memory is allocated for the pbuf, even for protocol headers. Additional headers must be prepended by allocating another pbuf and chain in to the front of the ROM pbuf. It is assumed that the memory used is really similar to ROM in that it is immutable and will not be changed. Memory which is dynamic should generally not be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
  • PBUF_REF: no buffer memory is allocated for the pbuf, even for protocol headers. It is assumed that the pbuf is only being used in a single thread. If the pbuf gets queued, then pbuf_take should be called to copy the buffer.
  • PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from the pbuf pool that is allocated during pbuf_init().
Returns
the allocated pbuf. If multiple pbufs where allocated, this is the first pbuf of a pbuf chain.
249 {
250  struct pbuf *p, *q, *r;
251  u16_t offset;
252  s32_t rem_len; /* remaining length */
253  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
254 
255  /* determine header offset */
256  switch (layer) {
257  case PBUF_TRANSPORT:
258  /* add room for transport (often TCP) layer header */
260  break;
261  case PBUF_IP:
262  /* add room for IP layer header */
264  break;
265  case PBUF_LINK:
266  /* add room for link layer header */
268  break;
269  case PBUF_RAW_TX:
270  /* add room for encapsulating link layer headers (e.g. 802.11) */
272  break;
273  case PBUF_RAW:
274  /* no offset (e.g. RX buffers or chain successors) */
275  offset = 0;
276  break;
277  default:
278  LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0);
279  return NULL;
280  }
281 
282  switch (type) {
283  case PBUF_POOL:
284  /* allocate head of pbuf chain into p */
285  p = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
286  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p));
287  if (p == NULL) {
289  return NULL;
290  }
291  p->type = type;
292  p->next = NULL;
293 
294  /* make the payload pointer point 'offset' bytes into pbuf data memory */
295  p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + (SIZEOF_STRUCT_PBUF + offset)));
296  LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned",
297  ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
298  /* the total length of the pbuf chain is the requested size */
299  p->tot_len = length;
300  /* set the length of the first pbuf in the chain */
302  LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
303  ((u8_t*)p->payload + p->len <=
305  LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
307  /* set reference count (needed here in case we fail) */
308  p->ref = 1;
309 
310  /* now allocate the tail of the pbuf chain */
311 
312  /* remember first pbuf for linkage in next iteration */
313  r = p;
314  /* remaining length to be allocated */
315  rem_len = length - p->len;
316  /* any remaining pbufs to be allocated? */
317  while (rem_len > 0) {
318  q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
319  if (q == NULL) {
321  /* free chain so far allocated */
322  pbuf_free(p);
323  /* bail out unsuccessfully */
324  return NULL;
325  }
326  q->type = type;
327  q->flags = 0;
328  q->next = NULL;
329  /* make previous pbuf point to this pbuf */
330  r->next = q;
331  /* set total length of this pbuf and next in chain */
332  LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff);
333  q->tot_len = (u16_t)rem_len;
334  /* this pbuf length is pool size, unless smaller sized tail */
336  q->payload = (void *)((u8_t *)q + SIZEOF_STRUCT_PBUF);
337  LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
338  ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
339  LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
340  ((u8_t*)p->payload + p->len <=
342  q->ref = 1;
343  /* calculate remaining length to be allocated */
344  rem_len -= q->len;
345  /* remember this pbuf for linkage in next iteration */
346  r = q;
347  }
348  /* end of chain */
349  /*r->next = NULL;*/
350 
351  break;
352  case PBUF_RAM:
353  {
355 
356  /* bug #50040: Check for integer overflow when calculating alloc_len */
357  if (alloc_len < LWIP_MEM_ALIGN_SIZE(length)) {
358  return NULL;
359  }
360 
361  /* If pbuf is to be allocated in RAM, allocate memory for it. */
362  p = (struct pbuf*)mem_malloc(alloc_len);
363  }
364 
365  if (p == NULL) {
366  return NULL;
367  }
368  /* Set up internal structure of the pbuf. */
369  p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset));
370  p->len = p->tot_len = length;
371  p->next = NULL;
372  p->type = type;
373 
374  LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
375  ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
376  break;
377  /* pbuf references existing (non-volatile static constant) ROM payload? */
378  case PBUF_ROM:
379  /* pbuf references existing (externally allocated) RAM payload? */
380  case PBUF_REF:
381  /* only allocate memory for the pbuf structure */
382  p = (struct pbuf *)memp_malloc(MEMP_PBUF);
383  if (p == NULL) {
385  ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n",
386  (type == PBUF_ROM) ? "ROM" : "REF"));
387  return NULL;
388  }
389  /* caller must set this field properly, afterwards */
390  p->payload = NULL;
391  p->len = p->tot_len = length;
392  p->next = NULL;
393  p->type = type;
394  break;
395  default:
396  LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
397  return NULL;
398  }
399  /* set reference count */
400  p->ref = 1;
401  /* set flags */
402  p->flags = 0;
403  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
404  return p;
405 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pbuf_realloc()

void pbuf_realloc ( struct pbuf p,
u16_t  new_len 
)

Shrink a pbuf chain to a desired length.

Parameters
ppbuf to shrink.
new_lendesired new length of pbuf chain

Depending on the desired length, the first few pbufs in a chain might be skipped and left unchanged. The new last pbuf in the chain will be resized, and any remaining pbufs will be freed.

Note
If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
May not be called on a packet queue.
Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
494 {
495  struct pbuf *q;
496  u16_t rem_len; /* remaining length */
497  s32_t grow;
498 
499  LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL);
500  LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL ||
501  p->type == PBUF_ROM ||
502  p->type == PBUF_RAM ||
503  p->type == PBUF_REF);
504 
505  /* desired length larger than current length? */
506  if (new_len >= p->tot_len) {
507  /* enlarging not yet supported */
508  return;
509  }
510 
511  /* the pbuf chain grows by (new_len - p->tot_len) bytes
512  * (which may be negative in case of shrinking) */
513  grow = new_len - p->tot_len;
514 
515  /* first, step over any pbufs that should remain in the chain */
516  rem_len = new_len;
517  q = p;
518  /* should this pbuf be kept? */
519  while (rem_len > q->len) {
520  /* decrease remaining length by pbuf length */
521  rem_len -= q->len;
522  /* decrease total length indicator */
523  LWIP_ASSERT("grow < max_u16_t", grow < 0xffff);
524  q->tot_len += (u16_t)grow;
525  /* proceed to next pbuf in chain */
526  q = q->next;
527  LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL);
528  }
529  /* we have now reached the new last pbuf (in q) */
530  /* rem_len == desired length for pbuf q */
531 
532  /* shrink allocated memory for PBUF_RAM */
533  /* (other types merely adjust their length fields */
534  if ((q->type == PBUF_RAM) && (rem_len != q->len)
536  && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0)
537 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
538  ) {
539  /* reallocate and adjust the length of the pbuf that will be split */
540  q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len);
541  LWIP_ASSERT("mem_trim returned q == NULL", q != NULL);
542  }
543  /* adjust length fields for new last pbuf */
544  q->len = rem_len;
545  q->tot_len = q->len;
546 
547  /* any remaining pbufs in chain? */
548  if (q->next != NULL) {
549  /* free remaining pbufs in chain */
550  pbuf_free(q->next);
551  }
552  /* q is last packet in chain */
553  q->next = NULL;
554 
555 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pbuf_free()

u8_t pbuf_free ( struct pbuf p)

Dereference a pbuf chain or queue and deallocate any no-longer-used pbufs at the head of this chain or queue.

Decrements the pbuf reference count. If it reaches zero, the pbuf is deallocated.

For a pbuf chain, this is repeated for each pbuf in the chain, up to the first pbuf which has a non-zero reference count after decrementing. So, when all reference counts are one, the whole chain is free'd.

Parameters
pThe pbuf (chain) to be dereferenced.
Returns
the number of pbufs that were de-allocated from the head of the chain.
Note
MUST NOT be called on a packet queue (Not verified to work yet).
the reference counter of a pbuf equals the number of pointers that refer to the pbuf (or into the pbuf).
716 {
717  u16_t type;
718  struct pbuf *q;
719  u8_t count;
720 
721  if (p == NULL) {
722  LWIP_ASSERT("p != NULL", p != NULL);
723  /* if assertions are disabled, proceed with debug output */
725  ("pbuf_free(p == NULL) was called.\n"));
726  return 0;
727  }
728  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p));
729 
730  PERF_START;
731 
732  LWIP_ASSERT("pbuf_free: sane type",
733  p->type == PBUF_RAM || p->type == PBUF_ROM ||
734  p->type == PBUF_REF || p->type == PBUF_POOL);
735 
736  count = 0;
737  /* de-allocate all consecutive pbufs from the head of the chain that
738  * obtain a zero reference count after decrementing*/
739  while (p != NULL) {
740  u16_t ref;
741  SYS_ARCH_DECL_PROTECT(old_level);
742  /* Since decrementing ref cannot be guaranteed to be a single machine operation
743  * we must protect it. We put the new ref into a local variable to prevent
744  * further protection. */
745  SYS_ARCH_PROTECT(old_level);
746  /* all pbufs in a chain are referenced at least once */
747  LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
748  /* decrease reference count (number of pointers to pbuf) */
749  ref = --(p->ref);
750  SYS_ARCH_UNPROTECT(old_level);
751  /* this pbuf is no longer referenced to? */
752  if (ref == 0) {
753  /* remember next pbuf in chain for next iteration */
754  q = p->next;
755  LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p));
756  type = p->type;
757 #if LWIP_SUPPORT_CUSTOM_PBUF
758  /* is this a custom pbuf? */
759  if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) {
760  struct pbuf_custom *pc = (struct pbuf_custom*)p;
761  LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL);
762  pc->custom_free_function(p);
763  } else
764 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
765  {
766  /* is this a pbuf from the pool? */
767  if (type == PBUF_POOL) {
768  memp_free(MEMP_PBUF_POOL, p);
769  /* is this a ROM or RAM referencing pbuf? */
770  } else if (type == PBUF_ROM || type == PBUF_REF) {
771  memp_free(MEMP_PBUF, p);
772  /* type == PBUF_RAM */
773  } else {
774  mem_free(p);
775  }
776  }
777  count++;
778  /* proceed to next pbuf */
779  p = q;
780  /* p->ref > 0, this pbuf is still referenced to */
781  /* (and so the remaining pbufs in chain as well) */
782  } else {
783  LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref));
784  /* stop walking through the chain */
785  p = NULL;
786  }
787  }
788  PERF_STOP("pbuf_free");
789  /* return number of de-allocated pbufs */
790  return count;
791 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pbuf_ref()

void pbuf_ref ( struct pbuf p)

Increment the reference count of the pbuf.

Parameters
ppbuf to increase reference counter of
821 {
822  /* pbuf given? */
823  if (p != NULL) {
824  SYS_ARCH_INC(p->ref, 1);
825  LWIP_ASSERT("pbuf ref overflow", p->ref > 0);
826  }
827 }
Here is the caller graph for this function:

◆ pbuf_cat()

void pbuf_cat ( struct pbuf h,
struct pbuf t 
)

Concatenate two pbufs (each may be a pbuf chain) and take over the caller's reference of the tail pbuf.

Note
The caller MAY NOT reference the tail pbuf afterwards. Use pbuf_chain() for that purpose.
See also
pbuf_chain()
841 {
842  struct pbuf *p;
843 
844  LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)",
845  ((h != NULL) && (t != NULL)), return;);
846 
847  /* proceed to last pbuf of chain */
848  for (p = h; p->next != NULL; p = p->next) {
849  /* add total length of second chain to all totals of first chain */
850  p->tot_len += t->tot_len;
851  }
852  /* { p is last pbuf of first h chain, p->next == NULL } */
853  LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
854  LWIP_ASSERT("p->next == NULL", p->next == NULL);
855  /* add total length of second chain to last pbuf total of first chain */
856  p->tot_len += t->tot_len;
857  /* chain last pbuf of head (p) with first of tail (t) */
858  p->next = t;
859  /* p->next now references t, but the caller will drop its reference to t,
860  * so netto there is no change to the reference count of t.
861  */
862 }
Here is the caller graph for this function:

◆ pbuf_chain()

void pbuf_chain ( struct pbuf h,
struct pbuf t 
)

Chain two pbufs (or pbuf chains) together.

The caller MUST call pbuf_free(t) once it has stopped using it. Use pbuf_cat() instead if you no longer use t.

Parameters
hhead pbuf (chain)
ttail pbuf (chain)
Note
The pbufs MUST belong to the same packet.
MAY NOT be called on a packet queue.

The ->tot_len fields of all pbufs of the head chain are adjusted. The ->next field of the last pbuf of the head chain is adjusted. The ->ref field of the first pbuf of the tail chain is adjusted.

883 {
884  pbuf_cat(h, t);
885  /* t is now referenced by h */
886  pbuf_ref(t);
887  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
888 }
Here is the call graph for this function:

◆ pbuf_copy()

err_t pbuf_copy ( struct pbuf p_to,
const struct pbuf p_from 
)

Create PBUF_RAM copies of pbufs.

Used to queue packets on behalf of the lwIP stack, such as ARP based queueing.

Note
You MUST explicitly use p = pbuf_take(p);
Only one packet is copied, no packet queue!
Parameters
p_topbuf destination of the copy
p_frompbuf source of the copy
Returns
ERR_OK if pbuf was copied ERR_ARG if one of the pbufs is NULL or p_to is not big enough to hold p_from
949 {
950  u16_t offset_to=0, offset_from=0, len;
951 
952  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n",
953  (const void*)p_to, (const void*)p_from));
954 
955  /* is the target big enough to hold the source? */
956  LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) &&
957  (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;);
958 
959  /* iterate through pbuf chain */
960  do
961  {
962  /* copy one part of the original chain */
963  if ((p_to->len - offset_to) >= (p_from->len - offset_from)) {
964  /* complete current p_from fits into current p_to */
965  len = p_from->len - offset_from;
966  } else {
967  /* current p_from does not fit into current p_to */
968  len = p_to->len - offset_to;
969  }
970  MEMCPY((u8_t*)p_to->payload + offset_to, (u8_t*)p_from->payload + offset_from, len);
971  offset_to += len;
972  offset_from += len;
973  LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len);
974  LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len);
975  if (offset_from >= p_from->len) {
976  /* on to next p_from (if any) */
977  offset_from = 0;
978  p_from = p_from->next;
979  }
980  if (offset_to == p_to->len) {
981  /* on to next p_to (if any) */
982  offset_to = 0;
983  p_to = p_to->next;
984  LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL) , return ERR_ARG;);
985  }
986 
987  if ((p_from != NULL) && (p_from->len == p_from->tot_len)) {
988  /* don't copy more than one packet! */
989  LWIP_ERROR("pbuf_copy() does not allow packet queues!",
990  (p_from->next == NULL), return ERR_VAL;);
991  }
992  if ((p_to != NULL) && (p_to->len == p_to->tot_len)) {
993  /* don't copy more than one packet! */
994  LWIP_ERROR("pbuf_copy() does not allow packet queues!",
995  (p_to->next == NULL), return ERR_VAL;);
996  }
997  } while (p_from);
998  LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n"));
999  return ERR_OK;
1000 }
Here is the caller graph for this function:

◆ pbuf_copy_partial()

u16_t pbuf_copy_partial ( const struct pbuf buf,
void dataptr,
u16_t  len,
u16_t  offset 
)

Copy (part of) the contents of a packet buffer to an application supplied buffer.

Parameters
bufthe pbuf from which to copy data
dataptrthe application supplied buffer
lenlength of data to copy (dataptr must be big enough). No more than buf->tot_len will be copied, irrespective of len
offsetoffset into the packet buffer from where to begin copying len bytes
Returns
the number of bytes copied, or 0 on failure
1016 {
1017  const struct pbuf *p;
1018  u16_t left;
1019  u16_t buf_copy_len;
1020  u16_t copied_total = 0;
1021 
1022  LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;);
1023  LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;);
1024 
1025  left = 0;
1026 
1027  if ((buf == NULL) || (dataptr == NULL)) {
1028  return 0;
1029  }
1030 
1031  /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1032  for (p = buf; len != 0 && p != NULL; p = p->next) {
1033  if ((offset != 0) && (offset >= p->len)) {
1034  /* don't copy from this buffer -> on to the next */
1035  offset -= p->len;
1036  } else {
1037  /* copy from this buffer. maybe only partially. */
1038  buf_copy_len = p->len - offset;
1039  if (buf_copy_len > len) {
1040  buf_copy_len = len;
1041  }
1042  /* copy the necessary parts of the buffer */
1043  MEMCPY(&((char*)dataptr)[left], &((char*)p->payload)[offset], buf_copy_len);
1044  copied_total += buf_copy_len;
1045  left += buf_copy_len;
1046  len -= buf_copy_len;
1047  offset = 0;
1048  }
1049  }
1050  return copied_total;
1051 }

◆ pbuf_skip()

struct pbuf* pbuf_skip ( struct pbuf in,
u16_t  in_offset,
u16_t out_offset 
)

Skip a number of bytes at the start of a pbuf

Parameters
ininput pbuf
in_offsetoffset to skip
out_offsetresulting offset in the returned pbuf
Returns
the pbuf in the queue where the offset is
1132 {
1133  const struct pbuf* out = pbuf_skip_const(in, in_offset, out_offset);
1134  return LWIP_CONST_CAST(struct pbuf*, out);
1135 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pbuf_take()

err_t pbuf_take ( struct pbuf buf,
const void dataptr,
u16_t  len 
)

Copy application supplied data into a pbuf. This function can only be used to copy the equivalent of buf->tot_len data.

Parameters
bufpbuf to fill with data
dataptrapplication supplied data buffer
lenlength of the application supplied data buffer
Returns
ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1150 {
1151  struct pbuf *p;
1152  u16_t buf_copy_len;
1153  u16_t total_copy_len = len;
1154  u16_t copied_total = 0;
1155 
1156  LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;);
1157  LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;);
1158  LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;);
1159 
1160  if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) {
1161  return ERR_ARG;
1162  }
1163 
1164  /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1165  for (p = buf; total_copy_len != 0; p = p->next) {
1166  LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL);
1167  buf_copy_len = total_copy_len;
1168  if (buf_copy_len > p->len) {
1169  /* this pbuf cannot hold all remaining data */
1170  buf_copy_len = p->len;
1171  }
1172  /* copy the necessary parts of the buffer */
1173  MEMCPY(p->payload, &((const char*)dataptr)[copied_total], buf_copy_len);
1174  total_copy_len -= buf_copy_len;
1175  copied_total += buf_copy_len;
1176  }
1177  LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len);
1178  return ERR_OK;
1179 }
Here is the caller graph for this function:

◆ pbuf_take_at()

err_t pbuf_take_at ( struct pbuf buf,
const void dataptr,
u16_t  len,
u16_t  offset 
)

Same as pbuf_take() but puts data at an offset

Parameters
bufpbuf to fill with data
dataptrapplication supplied data buffer
lenlength of the application supplied data buffer
offsetoffset in pbuf where to copy dataptr to
Returns
ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1194 {
1195  u16_t target_offset;
1196  struct pbuf* q = pbuf_skip(buf, offset, &target_offset);
1197 
1198  /* return requested data if pbuf is OK */
1199  if ((q != NULL) && (q->tot_len >= target_offset + len)) {
1200  u16_t remaining_len = len;
1201  const u8_t* src_ptr = (const u8_t*)dataptr;
1202  /* copy the part that goes into the first pbuf */
1203  u16_t first_copy_len = LWIP_MIN(q->len - target_offset, len);
1204  MEMCPY(((u8_t*)q->payload) + target_offset, dataptr, first_copy_len);
1205  remaining_len -= first_copy_len;
1206  src_ptr += first_copy_len;
1207  if (remaining_len > 0) {
1208  return pbuf_take(q->next, src_ptr, remaining_len);
1209  }
1210  return ERR_OK;
1211  }
1212  return ERR_MEM;
1213 }
Here is the call graph for this function:

◆ pbuf_coalesce()

struct pbuf* pbuf_coalesce ( struct pbuf p,
pbuf_layer  layer 
)

Creates a single pbuf out of a queue of pbufs.

Remarks
: Either the source pbuf 'p' is freed by this function or the original pbuf 'p' is returned, therefore the caller has to check the result!
Parameters
pthe source pbuf
layerpbuf_layer of the new pbuf
Returns
a new, single pbuf (p->next is NULL) or the old pbuf if allocation fails
1230 {
1231  struct pbuf *q;
1232  err_t err;
1233  if (p->next == NULL) {
1234  return p;
1235  }
1236  q = pbuf_alloc(layer, p->tot_len, PBUF_RAM);
1237  if (q == NULL) {
1238  /* @todo: what do we do now? */
1239  return p;
1240  }
1241  err = pbuf_copy(q, p);
1242  LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */
1243  LWIP_ASSERT("pbuf_copy failed", err == ERR_OK);
1244  pbuf_free(p);
1245  return q;
1246 }
Here is the call graph for this function:

◆ pbuf_get_at()

u8_t pbuf_get_at ( const struct pbuf p,
u16_t  offset 
)

Get one byte from the specified position in a pbuf WARNING: returns zero for offset >= p->tot_len

Parameters
ppbuf to parse
offsetoffset into p of the byte to return
Returns
byte at an offset into p OR ZERO IF 'offset' >= p->tot_len
1300 {
1301  int ret = pbuf_try_get_at(p, offset);
1302  if (ret >= 0) {
1303  return (u8_t)ret;
1304  }
1305  return 0;
1306 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pbuf_try_get_at()

int pbuf_try_get_at ( const struct pbuf p,
u16_t  offset 
)

Get one byte from the specified position in a pbuf

Parameters
ppbuf to parse
offsetoffset into p of the byte to return
Returns
byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len
1318 {
1319  u16_t q_idx;
1320  const struct pbuf* q = pbuf_skip_const(p, offset, &q_idx);
1321 
1322  /* return requested data if pbuf is OK */
1323  if ((q != NULL) && (q->len > q_idx)) {
1324  return ((u8_t*)q->payload)[q_idx];
1325  }
1326  return -1;
1327 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pbuf_put_at()

void pbuf_put_at ( struct pbuf p,
u16_t  offset,
u8_t  data 
)

Put one byte to the specified position in a pbuf WARNING: silently ignores offset >= p->tot_len

Parameters
ppbuf to fill
offsetoffset into p of the byte to write
databyte to write at an offset into p
1340 {
1341  u16_t q_idx;
1342  struct pbuf* q = pbuf_skip(p, offset, &q_idx);
1343 
1344  /* write requested data if pbuf is OK */
1345  if ((q != NULL) && (q->len > q_idx)) {
1346  ((u8_t*)q->payload)[q_idx] = data;
1347  }
1348 }
Here is the call graph for this function:

◆ pbuf_memcmp()

u16_t pbuf_memcmp ( const struct pbuf p,
u16_t  offset,
const void s2,
u16_t  n 
)

Compare pbuf contents at specified offset with memory s2, both of length n

Parameters
ppbuf to compare
offsetoffset into p at which to start comparing
s2buffer to compare
nlength of buffer to compare
Returns
zero if equal, nonzero otherwise (0xffff if p is too short, diffoffset+1 otherwise)
1363 {
1364  u16_t start = offset;
1365  const struct pbuf* q = p;
1366  u16_t i;
1367 
1368  /* pbuf long enough to perform check? */
1369  if(p->tot_len < (offset + n)) {
1370  return 0xffff;
1371  }
1372 
1373  /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */
1374  while ((q != NULL) && (q->len <= start)) {
1375  start -= q->len;
1376  q = q->next;
1377  }
1378 
1379  /* return requested data if pbuf is OK */
1380  for (i = 0; i < n; i++) {
1381  /* We know pbuf_get_at() succeeds because of p->tot_len check above. */
1382  u8_t a = pbuf_get_at(q, start + i);
1383  u8_t b = ((const u8_t*)s2)[i];
1384  if (a != b) {
1385  return i+1;
1386  }
1387  }
1388  return 0;
1389 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pbuf_memfind()

u16_t pbuf_memfind ( const struct pbuf p,
const void mem,
u16_t  mem_len,
u16_t  start_offset 
)

Find occurrence of mem (with length mem_len) in pbuf p, starting at offset start_offset.

Parameters
ppbuf to search, maximum length is 0xFFFE since 0xFFFF is used as return value 'not found'
memsearch for the contents of this buffer
mem_lenlength of 'mem'
start_offsetoffset into p at which to start searching
Returns
0xFFFF if substr was not found in p or the index where it was found
1405 {
1406  u16_t i;
1407  u16_t max = p->tot_len - mem_len;
1408  if (p->tot_len >= mem_len + start_offset) {
1409  for (i = start_offset; i <= max; i++) {
1410  u16_t plus = pbuf_memcmp(p, i, mem, mem_len);
1411  if (plus == 0) {
1412  return i;
1413  }
1414  }
1415  }
1416  return 0xFFFF;
1417 }
Here is the call graph for this function:
Here is the caller graph for this function:
PBUF_POOL
Definition: pbuf.h:123
pbuf_skip
struct pbuf * pbuf_skip(struct pbuf *in, u16_t in_offset, u16_t *out_offset)
Definition: pbuf.c:1131
pbuf_try_get_at
int pbuf_try_get_at(const struct pbuf *p, u16_t offset)
Definition: pbuf.c:1317
PBUF_FLAG_IS_CUSTOM
#define PBUF_FLAG_IS_CUSTOM
Definition: pbuf.h:131
pbuf::len
u16_t len
Definition: pbuf.h:159
SYS_ARCH_UNPROTECT
#define SYS_ARCH_UNPROTECT(lev)
Definition: sys.h:403
PBUF_LINK
Definition: pbuf.h:85
memp_malloc
void * memp_malloc(memp_t type)
Definition: memp.c:385
pbuf::ref
u16_t ref
Definition: pbuf.h:172
PBUF_LINK_ENCAPSULATION_HLEN
#define PBUF_LINK_ENCAPSULATION_HLEN
Definition: opt.h:1364
PBUF_POOL_IS_EMPTY
#define PBUF_POOL_IS_EMPTY()
Definition: pbuf.c:135
LWIP_ASSERT
#define LWIP_ASSERT(message, assertion)
Definition: debug.h:116
PBUF_LINK_HLEN
#define PBUF_LINK_HLEN
Definition: opt.h:1355
PBUF_ROM
Definition: pbuf.h:112
PBUF_RAW
Definition: pbuf.h:94
memp_free
void memp_free(memp_t type, void *mem)
Definition: memp.c:469
mem
Definition: mem.c:264
u16_t
uint16_t u16_t
Definition: arch.h:121
netif::input
netif_input_fn input
Definition: netif.h:244
PBUF_IP
Definition: pbuf.h:80
pbuf::tot_len
u16_t tot_len
Definition: pbuf.h:156
type
uint8_t type
Definition: UsbCore.h:184
g29_auto.start
start
Definition: g29_auto.py:150
data
uint8_t data[8]
Definition: masstorage.h:49
pbuf_take
err_t pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len)
Definition: pbuf.c:1149
i
uint8_t i
Definition: screen_test_graph.c:72
pbuf::next
struct pbuf * next
Definition: pbuf.h:144
SYS_ARCH_INC
#define SYS_ARCH_INC(var, val)
Definition: sys.h:415
PBUF_TRANSPORT
Definition: pbuf.h:76
max
#define max(a, b)
Definition: wiring_constants.h:40
pbuf_free
u8_t pbuf_free(struct pbuf *p)
Definition: pbuf.c:715
SYS_ARCH_PROTECT
#define SYS_ARCH_PROTECT(lev)
Definition: sys.h:402
SIZEOF_STRUCT_PBUF
#define SIZEOF_STRUCT_PBUF
Definition: pbuf.c:129
PBUF_TRANSPORT_HLEN
#define PBUF_TRANSPORT_HLEN
Definition: pbuf.h:61
LWIP_DBG_TRACE
#define LWIP_DBG_TRACE
Definition: debug.h:83
MEM_ALIGNMENT
#define MEM_ALIGNMENT
Definition: opt.h:248
NULL
#define NULL
Definition: usbd_def.h:53
LWIP_MIN
#define LWIP_MIN(x, y)
Definition: def.h:55
pbuf::flags
u8_t flags
Definition: pbuf.h:165
ERR_MEM
Definition: err.h:65
LWIP_DBG_LEVEL_SERIOUS
#define LWIP_DBG_LEVEL_SERIOUS
Definition: debug.h:57
LWIP_ERROR
#define LWIP_ERROR(message, expression, handler)
Definition: debug.h:135
pbuf_alloc
struct pbuf * pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
Definition: pbuf.c:248
s32_t
int32_t s32_t
Definition: arch.h:124
PBUF_IP_HLEN
#define PBUF_IP_HLEN
Definition: pbuf.h:65
u8_t
uint8_t u8_t
Definition: arch.h:119
pbuf_layer
pbuf_layer
Definition: pbuf.h:72
LWIP_SUPPORT_CUSTOM_PBUF
#define LWIP_SUPPORT_CUSTOM_PBUF
Definition: pbuf.h:55
pbuf_get_at
u8_t pbuf_get_at(const struct pbuf *p, u16_t offset)
Definition: pbuf.c:1299
LWIP_MEM_ALIGN_SIZE
#define LWIP_MEM_ALIGN_SIZE(size)
Definition: arch.h:214
g29_auto.layer
int layer
Definition: g29_auto.py:41
netif
Definition: netif.h:225
pbuf_type
pbuf_type
Definition: pbuf.h:101
PERF_START
#define PERF_START
Definition: def.h:46
pbuf_copy
err_t pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from)
Definition: pbuf.c:948
ERR_ARG
Definition: err.h:96
LWIP_UNUSED_ARG
#define LWIP_UNUSED_ARG(x)
Definition: arch.h:308
mem_trim
void * mem_trim(void *rmem, mem_size_t newsize)
Definition: mem.c:478
LWIP_MEMPOOL_FREE
#define LWIP_MEMPOOL_FREE(name, x)
Definition: memp.h:127
createSpeedLookupTable.a
list a
Definition: createSpeedLookupTable.py:29
LWIP_MEM_ALIGN
#define LWIP_MEM_ALIGN(addr)
Definition: arch.h:229
pbuf_ref
void pbuf_ref(struct pbuf *p)
Definition: pbuf.c:820
pbuf_cat
void pbuf_cat(struct pbuf *h, struct pbuf *t)
Definition: pbuf.c:840
PBUF_DEBUG
#define PBUF_DEBUG
Definition: opt.h:2645
PBUF_RAM
Definition: pbuf.h:108
pbuf_memcmp
u16_t pbuf_memcmp(const struct pbuf *p, u16_t offset, const void *s2, u16_t n)
Definition: pbuf.c:1362
MEMCPY
#define MEMCPY(dst, src, len)
Definition: opt.h:137
U16_F
#define U16_F
Definition: arch.h:143
PBUF_RAW_TX
Definition: pbuf.h:91
ERR_OK
Definition: err.h:63
err_t
s8_t err_t
Definition: err.h:57
mem_ptr_t
uintptr_t mem_ptr_t
Definition: arch.h:125
SYS_ARCH_DECL_PROTECT
#define SYS_ARCH_DECL_PROTECT(lev)
Definition: sys.h:401
PBUF_POOL_BUFSIZE_ALIGNED
#define PBUF_POOL_BUFSIZE_ALIGNED
Definition: pbuf.c:132
LWIP_MEMPOOL_DECLARE
#define LWIP_MEMPOOL_DECLARE(name, num, size, desc)
Definition: memp.h:95
mem_malloc
void * mem_malloc(mem_size_t size)
Definition: mem.c:603
mem_size_t
u16_t mem_size_t
Definition: mem.h:67
pbuf::type
u8_t type
Definition: pbuf.h:162
length
png_uint_32 length
Definition: png.c:2247
mem_free
void mem_free(void *rmem)
Definition: mem.c:419
pbuf_skip_const
static const struct pbuf * pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset)
Definition: pbuf.c:1105
ERR_VAL
Definition: err.h:75
createSpeedLookupTable.b
list b
Definition: createSpeedLookupTable.py:30
pbuf
Definition: pbuf.h:142
LWIP_DEBUGF
#define LWIP_DEBUGF(debug, message)
Definition: debug.h:164
LWIP_MEMPOOL_ALLOC
#define LWIP_MEMPOOL_ALLOC(name)
Definition: memp.h:122
LWIP_CONST_CAST
#define LWIP_CONST_CAST(target_type, val)
Definition: arch.h:180
PBUF_REF
Definition: pbuf.h:116
PERF_STOP
#define PERF_STOP(x)
Definition: def.h:47
pbuf::payload
void * payload
Definition: pbuf.h:147