]> xenbits.xensource.com Git - people/aperard/linux.git/commitdiff
net: call skb_defer_free_flush() from __napi_busy_loop()
authorEric Dumazet <edumazet@google.com>
Tue, 27 Feb 2024 21:01:04 +0000 (21:01 +0000)
committerJakub Kicinski <kuba@kernel.org>
Thu, 29 Feb 2024 04:22:03 +0000 (20:22 -0800)
skb_defer_free_flush() is currently called from net_rx_action()
and napi_threaded_poll().

We should also call it from __napi_busy_loop() otherwise
there is the risk the percpu queue can grow until an IPI
is forced from skb_attempt_defer_free() adding a latency spike.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Samiullah Khawaja <skhawaja@google.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Link: https://lore.kernel.org/r/20240227210105.3815474-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/dev.c

index 275fd5259a4a92d0bd2e145d66a716248b6c2804..053fac78305c7322b894ceb07a925f7e64ed70aa 100644 (file)
@@ -6173,6 +6173,27 @@ struct napi_struct *napi_by_id(unsigned int napi_id)
        return NULL;
 }
 
+static void skb_defer_free_flush(struct softnet_data *sd)
+{
+       struct sk_buff *skb, *next;
+
+       /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
+       if (!READ_ONCE(sd->defer_list))
+               return;
+
+       spin_lock(&sd->defer_lock);
+       skb = sd->defer_list;
+       sd->defer_list = NULL;
+       sd->defer_count = 0;
+       spin_unlock(&sd->defer_lock);
+
+       while (skb != NULL) {
+               next = skb->next;
+               napi_consume_skb(skb, 1);
+               skb = next;
+       }
+}
+
 #if defined(CONFIG_NET_RX_BUSY_POLL)
 
 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
@@ -6297,6 +6318,7 @@ count:
                if (work > 0)
                        __NET_ADD_STATS(dev_net(napi->dev),
                                        LINUX_MIB_BUSYPOLLRXPACKETS, work);
+               skb_defer_free_flush(this_cpu_ptr(&softnet_data));
                local_bh_enable();
 
                if (!loop_end || loop_end(loop_end_arg, start_time))
@@ -6726,27 +6748,6 @@ static int napi_thread_wait(struct napi_struct *napi)
        return -1;
 }
 
-static void skb_defer_free_flush(struct softnet_data *sd)
-{
-       struct sk_buff *skb, *next;
-
-       /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
-       if (!READ_ONCE(sd->defer_list))
-               return;
-
-       spin_lock(&sd->defer_lock);
-       skb = sd->defer_list;
-       sd->defer_list = NULL;
-       sd->defer_count = 0;
-       spin_unlock(&sd->defer_lock);
-
-       while (skb != NULL) {
-               next = skb->next;
-               napi_consume_skb(skb, 1);
-               skb = next;
-       }
-}
-
 static int napi_threaded_poll(void *data)
 {
        struct napi_struct *napi = data;