skb管理函数之alloc_skbdev_alloc_skbkfree_skbdev_kfree_skbconsume_skb
Posted Alex
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了skb管理函数之alloc_skbdev_alloc_skbkfree_skbdev_kfree_skbconsume_skb相关的知识,希望对你有一定的参考价值。
alloc_skb--分配skb
dev_alloc_skb--分配skb,通常被设备驱动用在中断上下文中,它是alloc_skb的封装函数,因为在中断处理函数中被调用,因此要求原子操作(GFP_ATOMIC)
kfree_skb--减少skb引用,为0则释放,用于出错丢包时释放skb使用;
dev_kfree_skb==consume_skb--减少skb引用,为0则释放,成功状态下释放skb使用;
1 static inline struct sk_buff *alloc_skb(unsigned int size, 2 gfp_t priority) 3 { 4 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 5 }
1 /** 2 * __alloc_skb - allocate a network buffer 3 * @size: size to allocate 4 * @gfp_mask: allocation mask 5 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 6 * instead of head cache and allocate a cloned (child) skb. 7 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 8 * allocations in case the data is required for writeback 9 * @node: numa node to allocate memory on 10 * 11 * Allocate a new &sk_buff. The returned buffer has no headroom and a 12 * tail room of at least size bytes. The object has a reference count 13 * of one. The return is the buffer. On a failure the return is %NULL. 14 * 15 * Buffers may only be allocated from interrupts using a @gfp_mask of 16 * %GFP_ATOMIC. 17 */ 18 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 19 int flags, int node) 20 { 21 struct kmem_cache *cache; 22 struct skb_shared_info *shinfo; 23 struct sk_buff *skb; 24 u8 *data; 25 bool pfmemalloc; 26 27 /* 得到分配使用的高速缓存 */ 28 cache = (flags & SKB_ALLOC_FCLONE) 29 ? skbuff_fclone_cache : skbuff_head_cache; 30 31 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 32 gfp_mask |= __GFP_MEMALLOC; 33 34 /* Get the HEAD */ 35 /* 分配skb */ 36 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 37 if (!skb) 38 goto out; 39 prefetchw(skb); 40 41 /* We do our best to align skb_shared_info on a separate cache 42 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 43 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 44 * Both skb->head and skb_shared_info are cache line aligned. 45 */ 46 /* 数据对齐 */ 47 size = SKB_DATA_ALIGN(size); 48 /* 对齐后的数据加上skb_shared_info对齐后的大小 */ 49 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 50 51 //分配数据区 52 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 53 if (!data) 54 goto nodata; 55 /* kmalloc(size) might give us more room than requested. 56 * Put skb_shared_info exactly at the end of allocated zone, 57 * to allow max possible filling before reallocation. 58 */ 59 /* 除了skb_shared_info以外的数据大小 */ 60 size = SKB_WITH_OVERHEAD(ksize(data)); 61 prefetchw(data + size); 62 63 /* 64 * Only clear those fields we need to clear, not those that we will 65 * actually initialise below. Hence, don‘t put any more fields after 66 * the tail pointer in struct sk_buff! 67 */ 68 memset(skb, 0, offsetof(struct sk_buff, tail)); 69 /* Account for allocated memory : skb + skb->head */ 70 /* 总长度= skb大小+ 数据大小+ skb_shared_info大小 */ 71 skb->truesize = SKB_TRUESIZE(size); 72 /* PFMEMALLOC分配标记 */ 73 skb->pfmemalloc = pfmemalloc; 74 /* 设置引用计数为1 */ 75 atomic_set(&skb->users, 1); 76 /*head data tail均指向数据区头部*/ 77 skb->head = data; 78 skb->data = data; 79 skb_reset_tail_pointer(skb); 80 81 /* end指向数据区尾部 */ 82 skb->end = skb->tail + size; 83 /* 初始化默认各层header偏移值 */ 84 skb->mac_header = (typeof(skb->mac_header))~0U; 85 skb->transport_header = (typeof(skb->transport_header))~0U; 86 87 /* make sure we initialize shinfo sequentially */ 88 /* 从end开始的区域为skb_shared_info */ 89 shinfo = skb_shinfo(skb); 90 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 91 /* 设置引用计数为1 */ 92 atomic_set(&shinfo->dataref, 1); 93 kmemcheck_annotate_variable(shinfo->destructor_arg); 94 95 /* 如果有克隆标记 */ 96 if (flags & SKB_ALLOC_FCLONE) { 97 struct sk_buff_fclones *fclones; 98 99 /* 得到clone结构 */ 100 fclones = container_of(skb, struct sk_buff_fclones, skb1); 101 102 kmemcheck_annotate_bitfield(&fclones->skb2, flags1); 103 104 /* 设置克隆标记 */ 105 skb->fclone = SKB_FCLONE_ORIG; 106 107 /* 设置引用为1 */ 108 atomic_set(&fclones->fclone_ref, 1); 109 110 /* 设置skb2的克隆标记 */ 111 fclones->skb2.fclone = SKB_FCLONE_CLONE; 112 } 113 out: 114 return skb; 115 nodata: 116 kmem_cache_free(cache, skb); 117 skb = NULL; 118 goto out; 119 }
1 /* legacy helper around netdev_alloc_skb() */ 2 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 3 { 4 return netdev_alloc_skb(NULL, length); 5 }
1 /** 2 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 3 * @dev: network device to receive on 4 * @length: length to allocate 5 * 6 * Allocate a new &sk_buff and assign it a usage count of one. The 7 * buffer has unspecified headroom built in. Users should allocate 8 * the headroom they think they need without accounting for the 9 * built in space. The built in space is used for optimisations. 10 * 11 * %NULL is returned if there is no free memory. Although this function 12 * allocates memory it can be called from an interrupt. 13 */ 14 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 15 unsigned int length) 16 { 17 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 18 }
1 /** 2 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 3 * @dev: network device to receive on 4 * @len: length to allocate 5 * @gfp_mask: get_free_pages mask, passed to alloc_skb 6 * 7 * Allocate a new &sk_buff and assign it a usage count of one. The 8 * buffer has NET_SKB_PAD headroom built in. Users should allocate 9 * the headroom they think they need without accounting for the 10 * built in space. The built in space is used for optimisations. 11 * 12 * %NULL is returned if there is no free memory. 13 */ 14 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 15 gfp_t gfp_mask) 16 { 17 struct page_frag_cache *nc; 18 unsigned long flags; 19 struct sk_buff *skb; 20 bool pfmemalloc; 21 void *data; 22 23 len += NET_SKB_PAD; 24 25 /* 26 分配长度+ skb_shared_info长度> 一页 27 有__GFP_DIRECT_RECLAIM | GFP_DMA 标记 28 */ 29 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 30 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 31 /* 通过__alloc_skb分配内存*/ 32 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 33 if (!skb) 34 goto skb_fail; 35 36 /* 分配成功 */ 37 goto skb_success; 38 } 39 40 /* 分配长度+ skb_shared_info长度*/ 41 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 42 /* 对整个长度进行对齐 */ 43 len = SKB_DATA_ALIGN(len); 44 45 if (sk_memalloc_socks()) 46 gfp_mask |= __GFP_MEMALLOC; 47 48 /* 保存中断 */ 49 local_irq_save(flags); 50 51 nc = this_cpu_ptr(&netdev_alloc_cache); 52 /* 分配空间 */ 53 data = page_frag_alloc(nc, len, gfp_mask); 54 pfmemalloc = nc->pfmemalloc; 55 56 /* 恢复中断 */ 57 local_irq_restore(flags); 58 59 if (unlikely(!data)) 60 return NULL; 61 62 /* 构建skb */ 63 skb = __build_skb(data, len); 64 if (unlikely(!skb)) { 65 skb_free_frag(data); 66 return NULL; 67 } 68 69 /* use OR instead of assignment to avoid clearing of bits in mask */ 70 /* 设置PFMEMALLOC标记 */ 71 if (pfmemalloc) 72 skb->pfmemalloc = 1; 73 74 //打内存分配标记 75 skb->head_frag = 1; 76 77 skb_success: 78 /* 保留空间 */ 79 skb_reserve(skb, NET_SKB_PAD); 80 /* 设置输入设备 */ 81 skb->dev = dev; 82 83 skb_fail: 84 return skb; 85 }
---free系列---
1 /** 2 * kfree_skb - free an sk_buff 3 * @skb: buffer to free 4 * 5 * Drop a reference to the buffer and free it if the usage count has 6 * hit zero. 7 */ 8 /* 9 释放skb 10 */ 11 void kfree_skb(struct sk_buff *skb) 12 { 13 if (unlikely(!skb)) 14 return; 15 /* 引用为1,可直接释放 */ 16 if (likely(atomic_read(&skb->users) == 1)) 17 smp_rmb(); 18 /* 19 对引用减1,并且判断,如果结果不为0 20 说明还有引用,返回 21 */ 22 else if (likely(!atomic_dec_and_test(&skb->users))) 23 return; 24 trace_kfree_skb(skb, __builtin_return_address(0)); 25 26 //真正的skb释放 27 __kfree_skb(skb); 28 }
1 /** 2 * __kfree_skb - private function 3 * @skb: buffer 4 * 5 * Free an sk_buff. Release anything attached to the buffer. 6 * Clean the state. This is an internal helper function. Users should 7 * always call kfree_skb 8 */ 9 /* 释放skb */ 10 void __kfree_skb(struct sk_buff *skb) 11 { 12 /* 释放skb附带的所有数据 */ 13 skb_release_all(skb); 14 /* 释放skb */ 15 kfree_skbmem(skb); 16 }
1 #define dev_kfree_skb(a) consume_skb(a)
1 /** 2 * consume_skb - free an skbuff 3 * @skb: buffer to free 4 * 5 * Drop a ref to the buffer and free it if the usage count has hit zero 6 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 7 * is being dropped after a failure and notes that 8 */ 9 /* 释放skb,与kfree_skb区别是,kfree_skb用于失败时丢包释放 */ 10 void consume_skb(struct sk_buff *skb) 11 { 12 if (unlikely(!skb)) 13 return; 14 if (likely(atomic_read(&skb->users) == 1)) 15 smp_rmb(); 16 else if (likely(!atomic_dec_and_test(&skb->users))) 17 return; 18 trace_consume_skb(skb); 19 __kfree_skb(skb); 20 }
以上是关于skb管理函数之alloc_skbdev_alloc_skbkfree_skbdev_kfree_skbconsume_skb的主要内容,如果未能解决你的问题,请参考以下文章
skb管理函数之alloc_skbdev_alloc_skbkfree_skbdev_kfree_skbconsume_skb