File tree Expand file tree Collapse file tree 3 files changed +17
-24
lines changed
Expand file tree Collapse file tree 3 files changed +17
-24
lines changed Original file line number Diff line number Diff line change @@ -3537,10 +3537,10 @@ struct softnet_data {
35373537 struct numa_drop_counters drop_counters ;
35383538
35393539 /* Another possibly contended cache line */
3540- spinlock_t defer_lock ____cacheline_aligned_in_smp ;
3541- atomic_t defer_count ;
3542- int defer_ipi_scheduled ;
3543- struct sk_buff * defer_list ;
3540+ struct llist_head defer_list ____cacheline_aligned_in_smp ;
3541+ atomic_long_t defer_count ;
3542+
3543+ int defer_ipi_scheduled ____cacheline_aligned_in_smp ;
35443544 call_single_data_t defer_csd ;
35453545};
35463546
Original file line number Diff line number Diff line change @@ -6717,22 +6717,16 @@ EXPORT_SYMBOL(napi_complete_done);
67176717
67186718static void skb_defer_free_flush (struct softnet_data * sd )
67196719{
6720+ struct llist_node * free_list ;
67206721 struct sk_buff * skb , * next ;
67216722
6722- /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6723- if (!READ_ONCE (sd -> defer_list ))
6723+ if (llist_empty (& sd -> defer_list ))
67246724 return ;
6725+ atomic_long_set (& sd -> defer_count , 0 );
6726+ free_list = llist_del_all (& sd -> defer_list );
67256727
6726- spin_lock (& sd -> defer_lock );
6727- skb = sd -> defer_list ;
6728- sd -> defer_list = NULL ;
6729- atomic_set (& sd -> defer_count , 0 );
6730- spin_unlock (& sd -> defer_lock );
6731-
6732- while (skb != NULL ) {
6733- next = skb -> next ;
6728+ llist_for_each_entry_safe (skb , next , free_list , ll_node ) {
67346729 napi_consume_skb (skb , 1 );
6735- skb = next ;
67366730 }
67376731}
67386732
@@ -12995,7 +12989,7 @@ static int __init net_dev_init(void)
1299512989 sd -> cpu = i ;
1299612990#endif
1299712991 INIT_CSD (& sd -> defer_csd , trigger_rx_softirq , sd );
12998- spin_lock_init (& sd -> defer_lock );
12992+ init_llist_head (& sd -> defer_list );
1299912993
1300012994 gro_init (& sd -> backlog .gro );
1300112995 sd -> backlog .poll = process_backlog ;
Original file line number Diff line number Diff line change @@ -7185,6 +7185,7 @@ static void kfree_skb_napi_cache(struct sk_buff *skb)
71857185 */
71867186void skb_attempt_defer_free (struct sk_buff * skb )
71877187{
7188+ unsigned long defer_count ;
71887189 int cpu = skb -> alloc_cpu ;
71897190 struct softnet_data * sd ;
71907191 unsigned int defer_max ;
@@ -7202,17 +7203,15 @@ nodefer: kfree_skb_napi_cache(skb);
72027203
72037204 sd = & per_cpu (softnet_data , cpu );
72047205 defer_max = READ_ONCE (net_hotdata .sysctl_skb_defer_max );
7205- if (atomic_read (& sd -> defer_count ) >= defer_max )
7206+ defer_count = atomic_long_inc_return (& sd -> defer_count );
7207+
7208+ if (defer_count >= defer_max )
72067209 goto nodefer ;
72077210
7208- spin_lock_bh (& sd -> defer_lock );
7209- /* Send an IPI every time queue reaches half capacity. */
7210- kick = (atomic_inc_return (& sd -> defer_count ) - 1 ) == (defer_max >> 1 );
7211+ llist_add (& skb -> ll_node , & sd -> defer_list );
72117212
7212- skb -> next = sd -> defer_list ;
7213- /* Paired with READ_ONCE() in skb_defer_free_flush() */
7214- WRITE_ONCE (sd -> defer_list , skb );
7215- spin_unlock_bh (& sd -> defer_lock );
7213+ /* Send an IPI every time queue reaches half capacity. */
7214+ kick = (defer_count - 1 ) == (defer_max >> 1 );
72167215
72177216 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
72187217 * if we are unlucky enough (this seems very unlikely).
You can’t perform that action at this time.
0 commit comments