Home | History | Annotate | Download | only in linux

Lines Matching defs:skb

51  *		skb->csum is undefined.
54 * skb->csum is undefined.
60 * the packet as seen by netif_rx in skb->csum.
62 * is able to produce some skb->csum, it MUST use HW,
67 * NONE: skb is checksummed by protocol or csum is not required.
70 * from skb->h.raw to the end and to record the checksum
71 * at skb->h.raw+skb->csum.
120 /* To allow 64K frame to be packed as single skb without frag_list */
132 * the end of the header data, ie. at skb->end.
147 * to the payload part of skb->data. The lower 16 bits hold references to
148 * the entire skb->data. It is up to the users of the skb to agree on
151 * All users must obey the rule that the skb->data reference count must be
155 * care about modifications to the header part of skb->data.
176 /* This indicates the skb is from an untrusted source. */
221 * @nfctinfo: Relationship of this skb to the connection
227 * done by skb DMA functions
269 * first. This is owned by whoever has the skb queued ATM.
288 void (*destructor)(struct sk_buff *skb);
330 extern void kfree_skb(struct sk_buff *skb);
331 extern void __kfree_skb(struct sk_buff *skb);
349 extern void kfree_skbmem(struct sk_buff *skb);
350 extern struct sk_buff *skb_clone(struct sk_buff *skb,
352 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
354 extern struct sk_buff *pskb_copy(struct sk_buff *skb,
356 extern int pskb_expand_head(struct sk_buff *skb,
359 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
361 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
364 extern int skb_pad(struct sk_buff *skb, int pad);
366 extern void skb_over_panic(struct sk_buff *skb, int len,
368 extern void skb_under_panic(struct sk_buff *skb, int len,
370 extern void skb_truesize_bug(struct sk_buff *skb);
372 static inline void skb_truesize_check(struct sk_buff *skb)
374 if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len))
375 skb_truesize_bug(skb);
378 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
380 int len,int odd, struct sk_buff *skb),
394 extern void skb_prepare_seq_read(struct sk_buff *skb,
401 extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
406 #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
421 * @skb: buffer to reference
426 static inline struct sk_buff *skb_get(struct sk_buff *skb)
428 atomic_inc(&skb->users);
429 return skb;
439 * @skb: buffer to check
445 static inline int skb_cloned(const struct sk_buff *skb)
447 return skb->cloned &&
448 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
453 * @skb: buffer to check
458 static inline int skb_header_cloned(const struct sk_buff *skb)
462 if (!skb->cloned)
465 dataref = atomic_read(&skb_shinfo(skb)->dataref);
472 * @skb: buffer to operate on
476 * part of skb->data after this.
478 static inline void skb_header_release(struct sk_buff *skb)
480 BUG_ON(skb->nohdr);
481 skb->nohdr = 1;
482 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
487 * @skb: buffer to check
492 static inline int skb_shared(const struct sk_buff *skb)
494 return atomic_read(&skb->users) != 1;
499 * @skb: buffer to check
510 static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
514 if (skb_shared(skb)) {
515 struct sk_buff *nskb = skb_clone(skb, pri);
516 kfree_skb(skb);
517 skb = nskb;
519 return skb;
531 * @skb: buffer to check
542 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
546 if (skb_cloned(skb)) {
547 struct sk_buff *nskb = skb_copy(skb, pri);
548 kfree_skb(skb); /* Free our shared copy */
549 skb = nskb;
551 return skb;
609 * this is needed for now since a whole lot of users of the skb-queue
750 extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
751 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
756 next = skb->next;
757 prev = skb->prev;
758 skb->next = skb->prev = NULL;
777 struct sk_buff *skb = skb_peek_tail(list);
778 if (skb)
779 __skb_unlink(skb, list);
780 return skb;
784 static inline int skb_is_nonlinear(const struct sk_buff *skb)
786 return skb->data_len;
789 static inline unsigned int skb_headlen(const struct sk_buff *skb)
791 return skb->len - skb->data_len;
794 static inline int skb_pagelen(const struct sk_buff *skb)
798 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
799 len += skb_shinfo(skb)->frags[i].size;
800 return len + skb_headlen(skb);
803 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
806 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
811 skb_shinfo(skb)->nr_frags = i + 1;
814 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
815 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
816 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
821 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
823 unsigned char *tmp = skb->tail;
824 SKB_LINEAR_ASSERT(skb);
825 skb->tail += len;
826 skb->len += len;
832 * @skb: buffer to use
839 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
841 unsigned char *tmp = skb->tail;
842 SKB_LINEAR_ASSERT(skb);
843 skb->tail += len;
844 skb->len += len;
845 if (unlikely(skb->tail>skb->end))
846 skb_over_panic(skb, len, current_text_addr());
850 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
852 skb->data -= len;
853 skb->len += len;
854 return skb->data;
859 * @skb: buffer to use
866 static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
868 skb->data -= len;
869 skb->len += len;
870 if (unlikely(skb->data<skb->head))
871 skb_under_panic(skb, len, current_text_addr());
872 return skb->data;
875 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
877 skb->len -= len;
878 BUG_ON(skb->len < skb->data_len);
879 return skb->data += len;
884 * @skb: buffer to use
892 static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
894 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
897 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
899 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
901 if (len > skb_headlen(skb) &&
902 !__pskb_pull_tail(skb, len-skb_headlen(skb)))
904 skb->len -= len;
905 return skb->data += len;
908 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
910 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
913 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
915 if (likely(len <= skb_headlen(skb)))
917 if (unlikely(len > skb->len))
919 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
924 * @skb: buffer to check
928 static inline int skb_headroom(const struct sk_buff *skb)
930 return skb->data - skb->head;
935 * @skb: buffer to check
939 static inline int skb_tailroom(const struct sk_buff *skb)
941 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
946 * @skb: buffer to alter
952 static inline void skb_reserve(struct sk_buff *skb, int len)
954 skb->data += len;
955 skb->tail += len;
983 * The networking layer reserves some headroom in skb data (via
984 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1001 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1003 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1005 if (unlikely(skb->data_len)) {
1009 skb->len = len;
1010 skb->tail = skb->data + len;
1015 * @skb: buffer to alter
1020 * The skb must be linear.
1022 static inline void skb_trim(struct sk_buff *skb, unsigned int len)
1024 if (skb->len > len)
1025 __skb_trim(skb, len);
1029 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1031 if (skb->data_len)
1032 return ___pskb_trim(skb, len);
1033 __skb_trim(skb, len);
1037 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1039 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1044 * @skb: buffer to alter
1048 * the skb is not cloned so we should never get an error due to out-
1051 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1053 int err = pskb_trim(skb, len);
1059 * @skb: buffer to orphan
1062 * destructor function and make the @skb unowned. The buffer continues
1065 static inline void skb_orphan(struct sk_buff *skb)
1067 if (skb->destructor)
1068 skb->destructor(skb);
1069 skb->destructor = NULL;
1070 skb->sk = NULL;
1084 struct sk_buff *skb;
1085 while ((skb = __skb_dequeue(list)) != NULL)
1086 kfree_skb(skb);
1104 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1105 if (likely(skb))
1106 skb_reserve(skb, NET_SKB_PAD);
1107 return skb;
1150 * skb_cow - copy header of skb when it is required
1151 * @skb: buffer to cow
1154 * If the skb passed lacks sufficient headroom or its data part
1156 * is returned and original skb is not changed.
1158 * The result is skb with writable area skb->head...skb->tail
1161 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1164 skb_headroom(skb);
1169 if (delta || skb_cloned(skb))
1170 return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) &
1177 * @skb: buffer to pad
1183 * success. The skb is freed on error.
1186 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1188 unsigned int size = skb->len;
1191 return skb_pad(skb, len-size);
1194 static inline int skb_add_data(struct sk_buff *skb,
1197 const int off = skb->len;
1199 if (skb->ip_summed == CHECKSUM_NONE) {
1202 skb_put(skb, copy),
1205 skb->csum = csum_block_add(skb->csum, csum, off);
1208 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1211 __skb_trim(skb, off);
1215 static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1219 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1227 static inline int __skb_linearize(struct sk_buff *skb)
1229 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1233 * skb_linearize - convert paged skb to linear one
1234 * @skb: buffer to linarize
1237 * is returned and the old skb data released.
1239 static inline int skb_linearize(struct sk_buff *skb)
1241 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1245 * skb_linearize_cow - make sure skb is linear and writable
1246 * @skb: buffer to process
1249 * is returned and the old skb data released.
1251 static inline int skb_linearize_cow(struct sk_buff *skb)
1253 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1254 __skb_linearize(skb) : 0;
1258 * skb_postpull_rcsum - update checksum for received skb after pull
1259 * @skb: buffer to update
1268 static inline void skb_postpull_rcsum(struct sk_buff *skb,
1271 if (skb->ip_summed == CHECKSUM_HW)
1272 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1275 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1278 * pskb_trim_rcsum - trim received skb and update checksum
1279 * @skb: buffer to trim
1286 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1288 if (likely(len >= skb->len))
1290 if (skb->ip_summed == CHECKSUM_HW)
1291 skb->ip_summed = CHECKSUM_NONE;
1292 return __pskb_trim(skb, len);
1313 #define skb_queue_walk(queue, skb) \
1314 for (skb = (queue)->next; \
1315 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1316 skb = skb->next)
1318 #define skb_queue_reverse_walk(queue, skb) \
1319 for (skb = (queue)->prev; \
1320 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1321 skb = skb->prev)
1331 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1334 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1335 extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1337 extern unsigned int skb_checksum(const struct sk_buff *skb, int offset,
1339 extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1341 extern int skb_store_bits(const struct sk_buff *skb, int offset,
1343 extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb,
1346 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1347 extern void skb_split(struct sk_buff *skb,
1350 extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1352 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1355 int hlen = skb_headlen(skb);
1358 return skb->data + offset;
1360 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1370 * skb_get_timestamp - get timestamp from a skb
1371 * @skb: skb to get stamp from
1374 * Timestamps are stored in the skb as offsets to a base timestamp.
1378 static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
1380 stamp->tv_sec = skb->tstamp.off_sec;
1381 stamp->tv_usec = skb->tstamp.off_usec;
1385 * skb_set_timestamp - set timestamp of a skb
1386 * @skb: skb to set stamp of
1389 * Timestamps are stored in the skb as offsets to a base timestamp.
1391 * it in the skb.
1393 static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp)
1395 skb->tstamp.off_sec = stamp->tv_sec;
1396 skb->tstamp.off_usec = stamp->tv_usec;
1399 extern void __net_timestamp(struct sk_buff *skb);
1401 extern unsigned int __skb_checksum_complete(struct sk_buff *skb);
1405 * @skb: packet to process
1408 * the value of skb->csum. The latter can be used to supply the
1416 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
1419 static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
1421 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1422 __skb_checksum_complete(skb);
1437 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1439 if (skb)
1440 atomic_inc(&skb->users);
1442 static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1444 if (skb)
1445 kfree_skb(skb);
1460 static inline void nf_reset(struct sk_buff *skb)
1462 nf_conntrack_put(skb->nfct);
1463 skb->nfct = NULL;
1465 nf_conntrack_put_reasm(skb->nfct_reasm);
1466 skb->nfct_reasm = NULL;
1469 nf_bridge_put(skb->nf_bridge);
1470 skb->nf_bridge = NULL;
1475 static inline void nf_reset(struct sk_buff *skb) {}
1484 static inline void skb_init_secmark(struct sk_buff *skb)
1486 skb->secmark = 0;
1492 static inline void skb_init_secmark(struct sk_buff *skb)
1496 static inline int skb_is_gso(const struct sk_buff *skb)
1498 return skb_shinfo(skb)->gso_size;