DPDK  make-f/builddir/build/BUILD/dpdk-stable-16.11.2/mk/rte.sdkconfig.mkshowversion
rte_mbuf.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * Copyright 2014 6WIND S.A.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Intel Corporation nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_MBUF_H_
36 #define _RTE_MBUF_H_
37 
56 #include <stdint.h>
57 #include <rte_common.h>
58 #include <rte_mempool.h>
59 #include <rte_memory.h>
60 #include <rte_atomic.h>
61 #include <rte_prefetch.h>
62 #include <rte_branch_prediction.h>
63 #include <rte_mbuf_ptype.h>
64 
65 #ifdef __cplusplus
66 extern "C" {
67 #endif
68 
69 /*
70  * Packet Offload Features Flags. It also carry packet type information.
71  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
72  *
73  * - RX flags start at bit position zero, and get added to the left of previous
74  * flags.
75  * - The most-significant 3 bits are reserved for generic mbuf flags
76  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
77  * added to the right of the previously defined flags i.e. they should count
78  * downwards, not upwards.
79  *
80  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
81  * rte_get_tx_ol_flag_name().
82  */
83 
90 #define PKT_RX_VLAN_PKT (1ULL << 0)
91 
92 #define PKT_RX_RSS_HASH (1ULL << 1)
93 #define PKT_RX_FDIR (1ULL << 2)
102 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
103 
111 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
112 
113 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
120 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
121 
130 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
131 
132 #define PKT_RX_IP_CKSUM_UNKNOWN 0
133 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
134 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
135 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
136 
145 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
146 
147 #define PKT_RX_L4_CKSUM_UNKNOWN 0
148 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
149 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
150 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
151 
152 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
153 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
154 #define PKT_RX_FDIR_ID (1ULL << 13)
155 #define PKT_RX_FDIR_FLX (1ULL << 14)
164 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
165 
171 #define PKT_RX_QINQ_PKT PKT_RX_QINQ_STRIPPED
172 
178 #define PKT_RX_LRO (1ULL << 16)
179 
180 /* add new RX flags here */
181 
182 /* add new TX flags here */
183 
189 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
190 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
191 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
192 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
193 /* add new TX TUNNEL type here */
194 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
195 
199 #define PKT_TX_QINQ_PKT (1ULL << 49)
214 #define PKT_TX_TCP_SEG (1ULL << 50)
215 
216 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
229 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
230 #define PKT_TX_TCP_CKSUM (1ULL << 52)
231 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
232 #define PKT_TX_UDP_CKSUM (3ULL << 52)
233 #define PKT_TX_L4_MASK (3ULL << 52)
242 #define PKT_TX_IP_CKSUM (1ULL << 54)
243 
250 #define PKT_TX_IPV4 (1ULL << 55)
251 
258 #define PKT_TX_IPV6 (1ULL << 56)
259 
260 #define PKT_TX_VLAN_PKT (1ULL << 57)
270 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
271 
277 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
278 
284 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
285 
286 #define __RESERVED (1ULL << 61)
288 #define IND_ATTACHED_MBUF (1ULL << 62)
290 /* Use final bit of flags to indicate a control mbuf */
291 #define CTRL_MBUF_FLAG (1ULL << 63)
294 #define RTE_MBUF_PRIV_ALIGN 8
295 
304 const char *rte_get_rx_ol_flag_name(uint64_t mask);
305 
318 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
319 
330 const char *rte_get_tx_ol_flag_name(uint64_t mask);
331 
344 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
345 
352 #define RTE_MBUF_DEFAULT_DATAROOM 2048
353 #define RTE_MBUF_DEFAULT_BUF_SIZE \
354  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
355 
356 /* define a set of marker types that can be used to refer to set points in the
357  * mbuf */
358 __extension__
359 typedef void *MARKER[0];
360 __extension__
361 typedef uint8_t MARKER8[0];
362 __extension__
363 typedef uint64_t MARKER64[0];
369 struct rte_mbuf {
370  MARKER cacheline0;
371 
372  void *buf_addr;
375  uint16_t buf_len;
377  /* next 6 bytes are initialised on RX descriptor rearm */
378  MARKER8 rearm_data;
379  uint16_t data_off;
380 
390  union {
392  uint16_t refcnt;
393  };
394  uint8_t nb_segs;
395  uint8_t port;
397  uint64_t ol_flags;
399  /* remaining bytes are set on RX when pulling packet from descriptor */
400  MARKER rx_descriptor_fields1;
401 
402  /*
403  * The packet type, which is the combination of outer/inner L2, L3, L4
404  * and tunnel types. The packet_type is about data really present in the
405  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
406  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
407  * vlan is stripped from the data.
408  */
410  union {
411  uint32_t packet_type;
412  struct {
413  uint32_t l2_type:4;
414  uint32_t l3_type:4;
415  uint32_t l4_type:4;
416  uint32_t tun_type:4;
417  uint32_t inner_l2_type:4;
418  uint32_t inner_l3_type:4;
419  uint32_t inner_l4_type:4;
420  };
421  };
422 
423  uint32_t pkt_len;
424  uint16_t data_len;
426  uint16_t vlan_tci;
427 
428  union {
429  uint32_t rss;
430  struct {
432  union {
433  struct {
434  uint16_t hash;
435  uint16_t id;
436  };
437  uint32_t lo;
439  };
440  uint32_t hi;
443  } fdir;
444  struct {
445  uint32_t lo;
446  uint32_t hi;
447  } sched;
448  uint32_t usr;
449  } hash;
451  uint32_t seqn;
454  uint16_t vlan_tci_outer;
455 
456  /* second cache line - fields only used in slow path or on TX */
457  MARKER cacheline1 __rte_cache_min_aligned;
458 
460  union {
461  void *userdata;
462  uint64_t udata64;
463  };
464 
465  struct rte_mempool *pool;
466  struct rte_mbuf *next;
468  /* fields to support TX offloads */
470  union {
471  uint64_t tx_offload;
472  __extension__
473  struct {
474  uint64_t l2_len:7;
478  uint64_t l3_len:9;
479  uint64_t l4_len:8;
480  uint64_t tso_segsz:16;
482  /* fields for TX offloading of tunnels */
483  uint64_t outer_l3_len:9;
484  uint64_t outer_l2_len:7;
486  /* uint64_t unused:8; */
487  };
488  };
489 
492  uint16_t priv_size;
493 
495  uint16_t timesync;
497 
508 static inline void
510 {
511  rte_prefetch0(&m->cacheline0);
512 }
513 
525 static inline void
527 {
528 #if RTE_CACHE_LINE_SIZE == 64
529  rte_prefetch0(&m->cacheline1);
530 #else
531  RTE_SET_USED(m);
532 #endif
533 }
534 
535 
536 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
537 
546 static inline phys_addr_t
548 {
549  return mb->buf_physaddr + mb->data_off;
550 }
551 
564 static inline phys_addr_t
566 {
567  return mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
568 }
569 
578 static inline struct rte_mbuf *
580 {
581  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
582 }
583 
592 static inline char *
594 {
595  char *buffer_addr;
596  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
597  return buffer_addr;
598 }
599 
603 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
604 
608 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
609 
618  uint16_t mbuf_priv_size;
619 };
620 
621 #ifdef RTE_LIBRTE_MBUF_DEBUG
622 
624 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
625 
626 #else /* RTE_LIBRTE_MBUF_DEBUG */
627 
629 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
630 
631 #endif /* RTE_LIBRTE_MBUF_DEBUG */
632 
633 #ifdef RTE_MBUF_REFCNT_ATOMIC
634 
642 static inline uint16_t
643 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
644 {
645  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
646 }
647 
655 static inline void
656 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
657 {
658  rte_atomic16_set(&m->refcnt_atomic, new_value);
659 }
660 
670 static inline uint16_t
671 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
672 {
673  /*
674  * The atomic_add is an expensive operation, so we don't want to
675  * call it in the case where we know we are the uniq holder of
676  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
677  * operation has to be used because concurrent accesses on the
678  * reference counter can occur.
679  */
680  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
681  rte_mbuf_refcnt_set(m, 1 + value);
682  return 1 + value;
683  }
684 
685  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
686 }
687 
688 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
689 
693 static inline uint16_t
694 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
695 {
696  m->refcnt = (uint16_t)(m->refcnt + value);
697  return m->refcnt;
698 }
699 
703 static inline uint16_t
705 {
706  return m->refcnt;
707 }
708 
712 static inline void
713 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
714 {
715  m->refcnt = new_value;
716 }
717 
718 #endif /* RTE_MBUF_REFCNT_ATOMIC */
719 
721 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
722  if ((m) != NULL) \
723  rte_prefetch0(m); \
724 } while (0)
725 
726 
739 void
740 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
741 
756 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
757 {
758  struct rte_mbuf *m;
759  void *mb = NULL;
760 
761  if (rte_mempool_get(mp, &mb) < 0)
762  return NULL;
763  m = (struct rte_mbuf *)mb;
764  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
765  rte_mbuf_refcnt_set(m, 1);
767 
768  return m;
769 }
770 
779 static inline void __attribute__((always_inline))
780 __rte_mbuf_raw_free(struct rte_mbuf *m)
781 {
782  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
783  rte_mempool_put(m->pool, m);
784 }
785 
786 /* Operations on ctrl mbuf */
787 
807 void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
808  void *m, unsigned i);
809 
822 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
823 
830 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
831 
840 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
841 
850 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
851 
861 static inline int
863 {
864  return !!(m->ol_flags & CTRL_MBUF_FLAG);
865 }
866 
867 /* Operations on pkt mbuf */
868 
888 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
889  void *m, unsigned i);
890 
891 
908 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
909 
945 struct rte_mempool *
946 rte_pktmbuf_pool_create(const char *name, unsigned n,
947  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
948  int socket_id);
949 
961 static inline uint16_t
963 {
964  struct rte_pktmbuf_pool_private *mbp_priv;
965 
966  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
967  return mbp_priv->mbuf_data_room_size;
968 }
969 
982 static inline uint16_t
984 {
985  struct rte_pktmbuf_pool_private *mbp_priv;
986 
987  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
988  return mbp_priv->mbuf_priv_size;
989 }
990 
999 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1000 {
1001  m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
1002 }
1003 
1012 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1013 {
1014  m->next = NULL;
1015  m->pkt_len = 0;
1016  m->tx_offload = 0;
1017  m->vlan_tci = 0;
1018  m->vlan_tci_outer = 0;
1019  m->nb_segs = 1;
1020  m->port = 0xff;
1021 
1022  m->ol_flags = 0;
1023  m->packet_type = 0;
1025 
1026  m->data_len = 0;
1028 }
1029 
1043 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1044 {
1045  struct rte_mbuf *m;
1046  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1047  rte_pktmbuf_reset(m);
1048  return m;
1049 }
1050 
1064 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1065  struct rte_mbuf **mbufs, unsigned count)
1066 {
1067  unsigned idx = 0;
1068  int rc;
1069 
1070  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1071  if (unlikely(rc))
1072  return rc;
1073 
1074  /* To understand duff's device on loop unwinding optimization, see
1075  * https://en.wikipedia.org/wiki/Duff's_device.
1076  * Here while() loop is used rather than do() while{} to avoid extra
1077  * check if count is zero.
1078  */
1079  switch (count % 4) {
1080  case 0:
1081  while (idx != count) {
1082  RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
1083  rte_mbuf_refcnt_set(mbufs[idx], 1);
1084  rte_pktmbuf_reset(mbufs[idx]);
1085  idx++;
1086  case 3:
1087  RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
1088  rte_mbuf_refcnt_set(mbufs[idx], 1);
1089  rte_pktmbuf_reset(mbufs[idx]);
1090  idx++;
1091  case 2:
1092  RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
1093  rte_mbuf_refcnt_set(mbufs[idx], 1);
1094  rte_pktmbuf_reset(mbufs[idx]);
1095  idx++;
1096  case 1:
1097  RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
1098  rte_mbuf_refcnt_set(mbufs[idx], 1);
1099  rte_pktmbuf_reset(mbufs[idx]);
1100  idx++;
1101  }
1102  }
1103  return 0;
1104 }
1105 
1123 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1124 {
1125  struct rte_mbuf *md;
1126 
1127  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1128  rte_mbuf_refcnt_read(mi) == 1);
1129 
1130  /* if m is not direct, get the mbuf that embeds the data */
1131  if (RTE_MBUF_DIRECT(m))
1132  md = m;
1133  else
1134  md = rte_mbuf_from_indirect(m);
1135 
1136  rte_mbuf_refcnt_update(md, 1);
1137  mi->priv_size = m->priv_size;
1138  mi->buf_physaddr = m->buf_physaddr;
1139  mi->buf_addr = m->buf_addr;
1140  mi->buf_len = m->buf_len;
1141 
1142  mi->next = m->next;
1143  mi->data_off = m->data_off;
1144  mi->data_len = m->data_len;
1145  mi->port = m->port;
1146  mi->vlan_tci = m->vlan_tci;
1147  mi->vlan_tci_outer = m->vlan_tci_outer;
1148  mi->tx_offload = m->tx_offload;
1149  mi->hash = m->hash;
1150 
1151  mi->next = NULL;
1152  mi->pkt_len = mi->data_len;
1153  mi->nb_segs = 1;
1154  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1155  mi->packet_type = m->packet_type;
1156 
1157  __rte_mbuf_sanity_check(mi, 1);
1159 }
1160 
1174 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1175 {
1176  struct rte_mbuf *md = rte_mbuf_from_indirect(m);
1177  struct rte_mempool *mp = m->pool;
1178  uint32_t mbuf_size, buf_len, priv_size;
1179 
1180  priv_size = rte_pktmbuf_priv_size(mp);
1181  mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1182  buf_len = rte_pktmbuf_data_room_size(mp);
1183 
1184  m->priv_size = priv_size;
1185  m->buf_addr = (char *)m + mbuf_size;
1186  m->buf_physaddr = rte_mempool_virt2phy(mp, m) + mbuf_size;
1187  m->buf_len = (uint16_t)buf_len;
1189  m->data_len = 0;
1190  m->ol_flags = 0;
1191 
1192  if (rte_mbuf_refcnt_update(md, -1) == 0)
1193  __rte_mbuf_raw_free(md);
1194 }
1195 
1196 static inline struct rte_mbuf* __attribute__((always_inline))
1197 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1198 {
1200 
1201  if (likely(rte_mbuf_refcnt_update(m, -1) == 0)) {
1202  /* if this is an indirect mbuf, it is detached. */
1203  if (RTE_MBUF_INDIRECT(m))
1204  rte_pktmbuf_detach(m);
1205  return m;
1206  }
1207  return NULL;
1208 }
1209 
1219 static inline void __attribute__((always_inline))
1221 {
1222  if (likely(NULL != (m = __rte_pktmbuf_prefree_seg(m)))) {
1223  m->next = NULL;
1224  __rte_mbuf_raw_free(m);
1225  }
1226 }
1227 
1237 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1238 {
1239  struct rte_mbuf *m_next;
1240 
1242 
1243  while (m != NULL) {
1244  m_next = m->next;
1246  m = m_next;
1247  }
1248 }
1249 
1267 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1268  struct rte_mempool *mp)
1269 {
1270  struct rte_mbuf *mc, *mi, **prev;
1271  uint32_t pktlen;
1272  uint8_t nseg;
1273 
1274  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1275  return NULL;
1276 
1277  mi = mc;
1278  prev = &mi->next;
1279  pktlen = md->pkt_len;
1280  nseg = 0;
1281 
1282  do {
1283  nseg++;
1284  rte_pktmbuf_attach(mi, md);
1285  *prev = mi;
1286  prev = &mi->next;
1287  } while ((md = md->next) != NULL &&
1288  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1289 
1290  *prev = NULL;
1291  mc->nb_segs = nseg;
1292  mc->pkt_len = pktlen;
1293 
1294  /* Allocation of new indirect segment failed */
1295  if (unlikely (mi == NULL)) {
1296  rte_pktmbuf_free(mc);
1297  return NULL;
1298  }
1299 
1300  __rte_mbuf_sanity_check(mc, 1);
1301  return mc;
1302 }
1303 
1315 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1316 {
1318 
1319  do {
1320  rte_mbuf_refcnt_update(m, v);
1321  } while ((m = m->next) != NULL);
1322 }
1323 
1332 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1333 {
1335  return m->data_off;
1336 }
1337 
1346 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1347 {
1349  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1350  m->data_len);
1351 }
1352 
1361 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1362 {
1363  struct rte_mbuf *m2 = (struct rte_mbuf *)m;
1364 
1366  while (m2->next != NULL)
1367  m2 = m2->next;
1368  return m2;
1369 }
1370 
1385 #define rte_pktmbuf_mtod_offset(m, t, o) \
1386  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1387 
1400 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1401 
1411 #define rte_pktmbuf_mtophys_offset(m, o) \
1412  (phys_addr_t)((m)->buf_physaddr + (m)->data_off + (o))
1413 
1421 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0)
1422 
1431 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1432 
1441 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1442 
1458 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1459  uint16_t len)
1460 {
1462 
1463  if (unlikely(len > rte_pktmbuf_headroom(m)))
1464  return NULL;
1465 
1466  m->data_off -= len;
1467  m->data_len = (uint16_t)(m->data_len + len);
1468  m->pkt_len = (m->pkt_len + len);
1469 
1470  return (char *)m->buf_addr + m->data_off;
1471 }
1472 
1488 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1489 {
1490  void *tail;
1491  struct rte_mbuf *m_last;
1492 
1494 
1495  m_last = rte_pktmbuf_lastseg(m);
1496  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1497  return NULL;
1498 
1499  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1500  m_last->data_len = (uint16_t)(m_last->data_len + len);
1501  m->pkt_len = (m->pkt_len + len);
1502  return (char*) tail;
1503 }
1504 
1519 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1520 {
1522 
1523  if (unlikely(len > m->data_len))
1524  return NULL;
1525 
1526  m->data_len = (uint16_t)(m->data_len - len);
1527  m->data_off += len;
1528  m->pkt_len = (m->pkt_len - len);
1529  return (char *)m->buf_addr + m->data_off;
1530 }
1531 
1546 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1547 {
1548  struct rte_mbuf *m_last;
1549 
1551 
1552  m_last = rte_pktmbuf_lastseg(m);
1553  if (unlikely(len > m_last->data_len))
1554  return -1;
1555 
1556  m_last->data_len = (uint16_t)(m_last->data_len - len);
1557  m->pkt_len = (m->pkt_len - len);
1558  return 0;
1559 }
1560 
1570 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1571 {
1573  return !!(m->nb_segs == 1);
1574 }
1575 
1579 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1580  uint32_t len, void *buf);
1581 
1602 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1603  uint32_t off, uint32_t len, void *buf)
1604 {
1605  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1606  return rte_pktmbuf_mtod_offset(m, char *, off);
1607  else
1608  return __rte_pktmbuf_read(m, off, len, buf);
1609 }
1610 
1627 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1628 {
1629  struct rte_mbuf *cur_tail;
1630 
1631  /* Check for number-of-segments-overflow */
1632  if (head->nb_segs + tail->nb_segs >= 1 << (sizeof(head->nb_segs) * 8))
1633  return -EOVERFLOW;
1634 
1635  /* Chain 'tail' onto the old tail */
1636  cur_tail = rte_pktmbuf_lastseg(head);
1637  cur_tail->next = tail;
1638 
1639  /* accumulate number of segments and total length. */
1640  head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs);
1641  head->pkt_len += tail->pkt_len;
1642 
1643  /* pkt_len is only set in the head */
1644  tail->pkt_len = tail->data_len;
1645 
1646  return 0;
1647 }
1648 
1663 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1664 
1665 #ifdef __cplusplus
1666 }
1667 #endif
1668 
1669 #endif /* _RTE_MBUF_H_ */
static void rte_pktmbuf_reset(struct rte_mbuf *m)
Definition: rte_mbuf.h:1012
struct rte_mbuf * next
Definition: rte_mbuf.h:466
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:617
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:454
static int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1495
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:177
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1043
static void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1220
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:359
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:608
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:288
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:983
uint8_t port
Definition: rte_mbuf.h:395
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1237
static phys_addr_t rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
Definition: rte_mempool.h:1621
uint64_t l2_len
Definition: rte_mbuf.h:474
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1267
void * buf_addr
Definition: rte_mbuf.h:372
uint32_t l2_type
Definition: rte_mbuf.h:413
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:579
uint16_t data_len
Definition: rte_mbuf.h:424
uint32_t lo
Definition: rte_mbuf.h:437
void * userdata
Definition: rte_mbuf.h:461
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1627
struct rte_mbuf::@70::@77 fdir
uint8_t nb_segs
Definition: rte_mbuf.h:394
uint64_t tso_segsz
Definition: rte_mbuf.h:480
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:361
uint64_t l4_len
Definition: rte_mbuf.h:479
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1332
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1064
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:999
uint32_t cache_size
Definition: rte_mempool.h:230
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:526
static int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: rte_mempool.h:1417
uint64_t outer_l3_len
Definition: rte_mbuf.h:483
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1570
struct rte_mempool * mp
uint64_t l3_len
Definition: rte_mbuf.h:478
uint32_t l4_type
Definition: rte_mbuf.h:415
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1346
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:492
uint16_t timesync
Definition: rte_mbuf.h:495
uint32_t hi
Definition: rte_mbuf.h:440
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:363
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define RTE_MIN(a, b)
Definition: rte_common.h:278
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:629
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:704
uint64_t outer_l2_len
Definition: rte_mbuf.h:484
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:191
#define CTRL_MBUF_FLAG
Definition: rte_mbuf.h:291
uint16_t refcnt
Definition: rte_mbuf.h:392
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1519
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1123
uint32_t tun_type
Definition: rte_mbuf.h:416
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:272
static int rte_is_ctrlmbuf(struct rte_mbuf *m)
Definition: rte_mbuf.h:862
uint64_t ol_flags
Definition: rte_mbuf.h:397
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1174
uint32_t pkt_len
Definition: rte_mbuf.h:423
uint16_t buf_len
Definition: rte_mbuf.h:375
uint32_t inner_l4_type
Definition: rte_mbuf.h:419
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1441
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:694
uint32_t packet_type
Definition: rte_mbuf.h:411
uint32_t seqn
Definition: rte_mbuf.h:451
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:962
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static phys_addr_t rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:547
phys_addr_t buf_physaddr
Definition: rte_mbuf.h:373
#define RTE_STD_C11
Definition: rte_common.h:64
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:465
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1488
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:713
struct rte_mbuf::@70::@78 sched
uint32_t inner_l3_type
Definition: rte_mbuf.h:418
uint32_t rss
Definition: rte_mbuf.h:429
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1546
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:593
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1602
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1458
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:756
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1315
uint64_t phys_addr_t
Definition: rte_memory.h:103
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:115
#define __rte_cache_aligned
Definition: rte_memory.h:96
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1361
uint64_t udata64
Definition: rte_mbuf.h:462
uint32_t l3_type
Definition: rte_mbuf.h:414
uint32_t inner_l2_type
Definition: rte_mbuf.h:417
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:509
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:391
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1649
uint64_t tx_offload
Definition: rte_mbuf.h:471
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:219
void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
static void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1223
uint16_t vlan_tci
Definition: rte_mbuf.h:426
#define RTE_MBUF_INDIRECT(mb)
Definition: rte_mbuf.h:603
#define RTE_SET_USED(x)
Definition: rte_common.h:103
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:1385
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static phys_addr_t rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:565
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
uint32_t usr
Definition: rte_mbuf.h:448