From bdbe94a8a785cbd2703993098103e5df4f6b0ab8 Mon Sep 17 00:00:00 2001 From: Praveen Kaligineedi Date: Thu, 16 Mar 2023 14:26:07 -0700 Subject: [PATCH 01/28] gve: Cache link_speed value from device The link speed is never changed for the uptime of a VM. Caching the value will allow for better performance. Fixes: 7e074d5a76ca ("gve: Enable Link Speed Reporting in the driver.") Signed-off-by: Joshua Washington --- google/gve/gve_ethtool.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/google/gve/gve_ethtool.c b/google/gve/gve_ethtool.c index ce574d0..5f81470 100644 --- a/google/gve/gve_ethtool.c +++ b/google/gve/gve_ethtool.c @@ -537,7 +537,10 @@ static int gve_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct gve_priv *priv = netdev_priv(netdev); - int err = gve_adminq_report_link_speed(priv); + int err = 0; + + if (priv->link_speed == 0) + err = gve_adminq_report_link_speed(priv); cmd->base.speed = priv->link_speed; return err; From 2beeab6636f39f69b3e0fffca1de56e339bf6a72 Mon Sep 17 00:00:00 2001 From: Praveen Kaligineedi Date: Thu, 16 Mar 2023 15:27:48 -0700 Subject: [PATCH 02/28] Update version to 1.3.3rc1 --- google/gve/gve_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index d29323e..92505a0 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -23,7 +23,7 @@ #define GVE_DEFAULT_RX_COPYBREAK (256) #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK) -#define GVE_VERSION "1.3.2" +#define GVE_VERSION "1.3.3rc1" #define GVE_VERSION_PREFIX "GVE-" // Minimum amount of time between queue kicks in msec (10 seconds) From 5cfb516f966567d627f66a4028a3c2c9c7d6d6fc Mon Sep 17 00:00:00 2001 From: Haiyue Wang Date: Tue, 15 Feb 2022 13:17:49 +0800 Subject: [PATCH 03/28] gve: enhance no queue page list detection The commit a5886ef4f4bf ("gve: Introduce per netdev `enum gve_queue_format`") introduces three queue format type, only GVE_GQI_QPL_FORMAT queue has page list. So it should use the queue page list number to detect the zero size queue page list. Correct the design logic. Using the 'queue_format == GVE_GQI_RDA_FORMAT' may lead to request zero sized memory allocation, like if the queue format is GVE_DQO_RDA_FORMAT. The kernel memory subsystem will return ZERO_SIZE_PTR, which is not NULL address, so the driver can run successfully. Also the code still checks the queue page list number firstly, then accesses the allocated memory, so zero number queue page list allocation will not lead to access fault. Signed-off-by: Haiyue Wang Reviewed-by: Bailey Forrest Link: https://lore.kernel.org/r/20220215051751.260866-1-haiyue.wang@intel.com Signed-off-by: Jakub Kicinski --- google/gve/gve_main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index 92505a0..c325573 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -902,8 +902,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) int i, j; int err; - /* Raw addressing means no QPLs */ - if (priv->queue_format == GVE_GQI_RDA_FORMAT) + if (num_qpls == 0) return 0; priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL); @@ -946,8 +945,7 @@ static void gve_free_qpls(struct gve_priv *priv) int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); int i; - /* Raw addressing means no QPLs */ - if (priv->queue_format == GVE_GQI_RDA_FORMAT) + if (num_qpls == 0) return; kvfree(priv->qpl_cfg.qpl_id_map); From c559dab81a07d021794c6a0521b42747632240b8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 27 Sep 2022 06:27:53 -0700 Subject: [PATCH 04/28] net: drop the weight argument from netif_napi_add We tell driver developers to always pass NAPI_POLL_WEIGHT as the weight to netif_napi_add(). This may be confusing to newcomers, drop the weight argument, those who really need to tweak the weight can use netif_napi_add_weight(). Acked-by: Marc Kleine-Budde # for CAN Link: https://lore.kernel.org/r/20220927132753.750069-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- google/gve/gve_main.c | 3 +-- patches/netif_napi_add.cocci | 9 ++++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index c325573..96d6f7f 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -571,8 +571,7 @@ static void gve_add_napi(struct gve_priv *priv, int ntfy_idx, { struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; - netif_napi_add(priv->dev, &block->napi, gve_poll, - NAPI_POLL_WEIGHT); + netif_napi_add(priv->dev, &block->napi, gve_poll); } static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) diff --git a/patches/netif_napi_add.cocci b/patches/netif_napi_add.cocci index 78f33f9..e69551f 100644 --- a/patches/netif_napi_add.cocci +++ b/patches/netif_napi_add.cocci @@ -2,7 +2,14 @@ expression dev, napi, func, weight; @@ -netif_napi_add(dev, napi, func, weight); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(6,0,0) +netif_napi_add(dev, napi, func); ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(6,0,0) */ ++netif_napi_add(dev, napi, func, NAPI_POLL_WEIGHT); ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(6,0,0) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) +napi_hash_add(napi); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) */ + + From cfd1a9609b463b43873994f054e170b8ff7088d0 Mon Sep 17 00:00:00 2001 From: Praveen Kaligineedi Date: Fri, 3 Feb 2023 13:20:45 -0800 Subject: [PATCH 05/28] gve: Fix gve interrupt names IRQs are currently requested before the netdevice is registered and a proper name is assigned to the device. Changing interrupt name to avoid using the format string in the name. Interrupt name before change: eth%d-ntfy-block. Interrupt name after change: gve-ntfy-blk@pci: Signed-off-by: Praveen Kaligineedi Reviewed-by: Jeroen de Borst Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- google/gve/gve_main.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index 96d6f7f..e356825 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -327,7 +327,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget) static int gve_alloc_notify_blocks(struct gve_priv *priv) { int num_vecs_requested = priv->num_ntfy_blks + 1; - char *name = priv->dev->name; unsigned int active_cpus; int vecs_enabled; int i, j; @@ -371,8 +370,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv) active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus()); /* Setup Management Vector - the last vector */ - snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt", - name); + snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s", + pci_name(priv->pdev)); err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv); if (err) { @@ -401,8 +400,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv) struct gve_notify_block *block = &priv->ntfy_blocks[i]; int msix_idx = i; - snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d", - name, i); + snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s", + i, pci_name(priv->pdev)); block->priv = priv; err = request_irq(priv->msix_vectors[msix_idx].vector, gve_is_gqi(priv) ? gve_intr : gve_intr_dqo, From 688237468703ace4a4b3c2ff2aec53820434504a Mon Sep 17 00:00:00 2001 From: Praveen Kaligineedi Date: Thu, 23 Mar 2023 13:25:13 -0700 Subject: [PATCH 06/28] Bump version to 1.3.3 --- google/gve/gve_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index e356825..8437133 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -23,7 +23,7 @@ #define GVE_DEFAULT_RX_COPYBREAK (256) #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK) -#define GVE_VERSION "1.3.3rc1" +#define GVE_VERSION "1.3.3" #define GVE_VERSION_PREFIX "GVE-" // Minimum amount of time between queue kicks in msec (10 seconds) From 9d73cf4ebe65bb601f0d3bea6100860dd33e27f6 Mon Sep 17 00:00:00 2001 From: Praveen Kaligineedi Date: Wed, 15 Mar 2023 16:33:08 -0700 Subject: [PATCH 07/28] gve: Add XDP support for GQI-QPL format Adding support for XDP DROP, PASS, TX, REDIRECT for GQI QPL format. Add AF_XDP zero-copy support. When an XDP program is installed, dedicated TX queues are created to handle XDP traffic. The user needs to ensure that the number of configured TX queues is equal to the number of configured RX queues; and the number of TX/RX queues is less than or equal to half the maximum number of TX/RX queues. The XDP traffic from AF_XDP sockets and from other NICs (arriving via XDP_REDIRECT) will also egress through the dedicated XDP TX queues. Although these changes support AF_XDP socket in zero-copy mode, there is still a copy happening within the driver between XSK buffer pool and QPL bounce buffers in GQI-QPL format. The following example demonstrates how the XDP packets are mapped to TX queues: Example configuration: Max RX queues : 2N, Max TX queues : 2N Configured RX queues : N, Configured TX queues : N TX queue mapping: TX queues with queue id 0,...,N-1 will handle traffic from the stack. TX queues with queue id N,...,2N-1 will handle XDP traffic. For the XDP packets transmitted using XDP_TX action: = N + For the XDP packets that arrive from other NICs via XDP_REDIRECT action: = N + ( smp_processor_id % N ) For AF_XDP zero-copy mode: = N + --- Updated version to 1.4.0rc1 --- google/gve/gve.h | 112 +++++- google/gve/gve_adminq.c | 8 +- google/gve/gve_adminq.h | 4 +- google/gve/gve_ethtool.c | 91 +++-- google/gve/gve_main.c | 721 ++++++++++++++++++++++++++++++++++++--- google/gve/gve_rx.c | 147 +++++++- google/gve/gve_rx_dqo.c | 2 +- google/gve/gve_tx.c | 298 ++++++++++++++-- google/gve/gve_utils.c | 6 +- google/gve/gve_utils.h | 3 +- patches/patch_xdp1.cocci | 307 +++++++++++++++++ patches/patch_xdp2.cocci | 107 ++++++ 12 files changed, 1667 insertions(+), 139 deletions(-) create mode 100644 patches/patch_xdp1.cocci create mode 100644 patches/patch_xdp2.cocci diff --git a/google/gve/gve.h b/google/gve/gve.h index 64eb044..e214b51 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -47,6 +47,10 @@ #define GVE_RX_BUFFER_SIZE_DQO 2048 +#define GVE_XDP_ACTIONS 5 + +#define GVE_TX_MAX_HEADER_SIZE 182 + /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ struct gve_rx_desc_queue { struct gve_rx_desc *desc_ring; /* the descriptor ring */ @@ -230,7 +234,10 @@ struct gve_rx_ring { u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */ - + u64 xdp_tx_errors; + u64 xdp_redirect_errors; + u64 xdp_alloc_fails; + u64 xdp_actions[GVE_XDP_ACTIONS]; u32 q_num; /* queue index */ u32 ntfy_id; /* notification block index */ struct gve_queue_resources *q_resources; /* head and tail pointer idx */ @@ -238,6 +245,12 @@ struct gve_rx_ring { struct u64_stats_sync statss; /* sync stats for 32bit archs */ struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */ + + /* XDP stuff */ + struct xdp_rxq_info xdp_rxq; + struct xdp_rxq_info xsk_rxq; + struct xsk_buff_pool *xsk_pool; + struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */ }; /* A TX desc ring entry */ @@ -258,7 +271,14 @@ struct gve_tx_iovec { * ring entry but only used for a pkt_desc not a seg_desc */ struct gve_tx_buffer_state { - struct sk_buff *skb; /* skb for this pkt */ + union { + struct sk_buff *skb; /* skb for this pkt */ + struct xdp_frame *xdp_frame; /* xdp_frame */ + }; + struct { + u16 size; /* size of xmitted xdp pkt */ + u8 is_xsk; /* xsk buff */ + } xdp; union { struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ struct { @@ -373,6 +393,8 @@ struct gve_tx_ring { struct { /* Spinlock for when cleanup in progress */ spinlock_t clean_lock; + /* Spinlock for XDP tx traffic */ + spinlock_t xdp_lock; }; /* DQO fields. */ @@ -450,6 +472,12 @@ struct gve_tx_ring { dma_addr_t q_resources_bus; /* dma address of the queue resources */ dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ struct u64_stats_sync statss; /* sync stats for 32bit archs */ + struct xsk_buff_pool *xsk_pool; + u32 xdp_xsk_wakeup; + u32 xdp_xsk_done; + u64 xdp_xsk_sent; + u64 xdp_xmit; + u64 xdp_xmit_errors; } ____cacheline_aligned; /* Wraps the info for one irq including the napi struct and the queues @@ -526,9 +554,11 @@ struct gve_priv { u16 rx_data_slot_cnt; /* rx buffer length */ u64 max_registered_pages; u64 num_registered_pages; /* num pages registered with NIC */ + struct bpf_prog *xdp_prog; /* XDP BPF program */ u32 rx_copybreak; /* copy packets smaller than this */ u16 default_num_queues; /* default num queues to set up */ + u16 num_xdp_queues; struct gve_queue_config tx_cfg; struct gve_queue_config rx_cfg; struct gve_qpl_config qpl_cfg; /* map used QPL ids */ @@ -785,7 +815,17 @@ static inline u32 gve_num_tx_qpls(struct gve_priv *priv) if (priv->queue_format != GVE_GQI_QPL_FORMAT) return 0; - return priv->tx_cfg.num_queues; + return priv->tx_cfg.num_queues + priv->num_xdp_queues; +} + +/* Returns the number of XDP tx queue page lists + */ +static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) +{ + if (priv->queue_format != GVE_GQI_QPL_FORMAT) + return 0; + + return priv->num_xdp_queues; } /* Returns the number of rx queue page lists @@ -798,16 +838,35 @@ static inline u32 gve_num_rx_qpls(struct gve_priv *priv) return priv->rx_cfg.num_queues; } +static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid) +{ + return tx_qid; +} + +static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) +{ + return priv->tx_cfg.max_queues + rx_qid; +} + +static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) +{ + return gve_tx_qpl_id(priv, 0); +} + +static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv) +{ + return gve_rx_qpl_id(priv, 0); +} + /* Returns a pointer to the next available tx qpl in the list of qpls */ static inline -struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv) +struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid) { - int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map, - priv->qpl_cfg.qpl_map_size); + int id = gve_tx_qpl_id(priv, tx_qid); - /* we are out of tx qpls */ - if (id >= gve_num_tx_qpls(priv)) + /* QPL already in use */ + if (test_bit(id, priv->qpl_cfg.qpl_id_map)) return NULL; set_bit(id, priv->qpl_cfg.qpl_id_map); @@ -817,14 +876,12 @@ struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv) /* Returns a pointer to the next available rx qpl in the list of qpls */ static inline -struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv) +struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid) { - int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map, - priv->qpl_cfg.qpl_map_size, - gve_num_tx_qpls(priv)); + int id = gve_rx_qpl_id(priv, rx_qid); - /* we are out of rx qpls */ - if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv)) + /* QPL already in use */ + if (test_bit(id, priv->qpl_cfg.qpl_id_map)) return NULL; set_bit(id, priv->qpl_cfg.qpl_id_map); @@ -843,7 +900,7 @@ static inline void gve_unassign_qpl(struct gve_priv *priv, int id) static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, int id) { - if (id < gve_num_tx_qpls(priv)) + if (id < gve_rx_start_qpl_id(priv)) return DMA_TO_DEVICE; else return DMA_FROM_DEVICE; @@ -855,6 +912,21 @@ static inline bool gve_is_gqi(struct gve_priv *priv) priv->queue_format == GVE_GQI_QPL_FORMAT; } +static inline u32 gve_num_tx_queues(struct gve_priv *priv) +{ + return priv->tx_cfg.num_queues + priv->num_xdp_queues; +} + +static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id) +{ + return priv->tx_cfg.num_queues + queue_id; +} + +static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv) +{ + return gve_xdp_tx_queue_id(priv, 0); +} + /* buffers */ int gve_alloc_page(struct gve_priv *priv, struct device *dev, struct page **page, dma_addr_t *dma, @@ -863,9 +935,15 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, enum dma_data_direction); /* tx handling */ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); +int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); +int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, + void *data, int len, void *frame_p); +void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); bool gve_tx_poll(struct gve_notify_block *block, int budget); -int gve_tx_alloc_rings(struct gve_priv *priv); -void gve_tx_free_rings_gqi(struct gve_priv *priv); +bool gve_xdp_poll(struct gve_notify_block *block, int budget); +int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings); +void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings); u32 gve_tx_load_event_counter(struct gve_priv *priv, struct gve_tx_ring *tx); bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); diff --git a/google/gve/gve_adminq.c b/google/gve/gve_adminq.c index 11725c7..1a122ef 100644 --- a/google/gve/gve_adminq.c +++ b/google/gve/gve_adminq.c @@ -516,12 +516,12 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) return gve_adminq_issue_cmd(priv, &cmd); } -int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues) +int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues) { int err; int i; - for (i = 0; i < num_queues; i++) { + for (i = start_id; i < start_id + num_queues; i++) { err = gve_adminq_create_tx_queue(priv, i); if (err) return err; @@ -604,12 +604,12 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index) return 0; } -int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues) +int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues) { int err; int i; - for (i = 0; i < num_queues; i++) { + for (i = start_id; i < start_id + num_queues; i++) { err = gve_adminq_destroy_tx_queue(priv, i); if (err) return err; diff --git a/google/gve/gve_adminq.h b/google/gve/gve_adminq.h index cf29662..f894beb 100644 --- a/google/gve/gve_adminq.h +++ b/google/gve/gve_adminq.h @@ -410,8 +410,8 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv, dma_addr_t db_array_bus_addr, u32 num_ntfy_blks); int gve_adminq_deconfigure_device_resources(struct gve_priv *priv); -int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues); -int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id); +int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues); +int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues); int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues); int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id); int gve_adminq_register_page_list(struct gve_priv *priv, diff --git a/google/gve/gve_ethtool.c b/google/gve/gve_ethtool.c index 5f81470..cfd4b8d 100644 --- a/google/gve/gve_ethtool.c +++ b/google/gve/gve_ethtool.c @@ -34,6 +34,11 @@ static u32 gve_get_msglevel(struct net_device *netdev) return priv->msg_enable; } +/* For the following stats column string names, make sure the order + * matches how it is filled in the code. For xdp_aborted, xdp_drop, + * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order + * as declared in enum xdp_action inside file uapi/linux/bpf.h . + */ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts", @@ -49,12 +54,16 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", + "rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]", + "rx_xdp_tx[%u]", "rx_xdp_redirect[%u]", + "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]", }; static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]", "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", - "tx_dma_mapping_error[%u]", + "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]", + "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]" }; static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { @@ -81,8 +90,10 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct gve_priv *priv = netdev_priv(netdev); char *s = (char *)data; + int num_tx_queues; int i, j; + num_tx_queues = gve_num_tx_queues(priv); switch (stringset) { case ETH_SS_STATS: memcpy(s, *gve_gstrings_main_stats, @@ -97,7 +108,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } } - for (i = 0; i < priv->tx_cfg.num_queues; i++) { + for (i = 0; i < num_tx_queues; i++) { for (j = 0; j < NUM_GVE_TX_CNTS; j++) { snprintf(s, ETH_GSTRING_LEN, gve_gstrings_tx_stats[j], i); @@ -124,12 +135,14 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) static int gve_get_sset_count(struct net_device *netdev, int sset) { struct gve_priv *priv = netdev_priv(netdev); + int num_tx_queues; + num_tx_queues = gve_num_tx_queues(priv); switch (sset) { case ETH_SS_STATS: return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN + (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) + - (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS); + (num_tx_queues * NUM_GVE_TX_CNTS); case ETH_SS_PRIV_FLAGS: return GVE_PRIV_FLAGS_STR_LEN; default: @@ -153,18 +166,20 @@ gve_get_ethtool_stats(struct net_device *netdev, struct gve_priv *priv; bool skip_nic_stats; unsigned int start; + int num_tx_queues; int ring; int i, j; ASSERT_RTNL(); priv = netdev_priv(netdev); + num_tx_queues = gve_num_tx_queues(priv); report_stats = priv->stats_report->stats; rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues, sizeof(int), GFP_KERNEL); if (!rx_qid_to_stats_idx) return; - tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues, + tx_qid_to_stats_idx = kmalloc_array(num_tx_queues, sizeof(int), GFP_KERNEL); if (!tx_qid_to_stats_idx) { kfree(rx_qid_to_stats_idx); @@ -195,7 +210,7 @@ gve_get_ethtool_stats(struct net_device *netdev, } } for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; - ring < priv->tx_cfg.num_queues; ring++) { + ring < num_tx_queues; ring++) { if (priv->tx) { do { start = @@ -232,7 +247,7 @@ gve_get_ethtool_stats(struct net_device *netdev, i = GVE_MAIN_STATS_LEN; /* For rx cross-reporting stats, start from nic rx stats in report */ - base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues + + base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues + GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues + base_stats_idx; @@ -283,14 +298,26 @@ gve_get_ethtool_stats(struct net_device *netdev, if (skip_nic_stats) { /* skip NIC rx stats */ i += NIC_RX_STATS_REPORT_NUM; - continue; - } - for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) { - u64 value = - be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value); + } else { + stats_idx = rx_qid_to_stats_idx[ring]; + for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) { + u64 value = + be64_to_cpu(report_stats[stats_idx + j].value); - data[i++] = value; + data[i++] = value; + } } + /* XDP rx counters */ + do { + start = u64_stats_fetch_begin(&priv->rx[ring].statss); + for (j = 0; j < GVE_XDP_ACTIONS; j++) + data[i + j] = rx->xdp_actions[j]; + data[i + j++] = rx->xdp_tx_errors; + data[i + j++] = rx->xdp_redirect_errors; + data[i + j++] = rx->xdp_alloc_fails; + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, + start)); + i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */ } } else { i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; @@ -298,7 +325,7 @@ gve_get_ethtool_stats(struct net_device *netdev, /* For tx cross-reporting stats, start from nic tx stats in report */ base_stats_idx = max_stats_idx; - max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues + + max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues + max_stats_idx; /* Preprocess the stats report for tx, map queue id to start index */ skip_nic_stats = false; @@ -316,7 +343,7 @@ gve_get_ethtool_stats(struct net_device *netdev, } /* walk TX rings */ if (priv->tx) { - for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { + for (ring = 0; ring < num_tx_queues; ring++) { struct gve_tx_ring *tx = &priv->tx[ring]; if (gve_is_gqi(priv)) { @@ -346,16 +373,28 @@ gve_get_ethtool_stats(struct net_device *netdev, if (skip_nic_stats) { /* skip NIC tx stats */ i += NIC_TX_STATS_REPORT_NUM; - continue; - } - for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) { - u64 value = - be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value); - data[i++] = value; + } else { + stats_idx = tx_qid_to_stats_idx[ring]; + for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) { + u64 value = + be64_to_cpu(report_stats[stats_idx + j].value); + data[i++] = value; + } } + /* XDP xsk counters */ + data[i++] = tx->xdp_xsk_wakeup; + data[i++] = tx->xdp_xsk_done; + do { + start = u64_stats_fetch_begin(&priv->tx[ring].statss); + data[i] = tx->xdp_xsk_sent; + data[i + 1] = tx->xdp_xmit; + data[i + 2] = tx->xdp_xmit_errors; + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, + start)); + i += 3; /* XDP tx counters */ } } else { - i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS; + i += num_tx_queues * NUM_GVE_TX_CNTS; } kfree(rx_qid_to_stats_idx); @@ -412,6 +451,12 @@ static int gve_set_channels(struct net_device *netdev, if (!new_rx || !new_tx) return -EINVAL; + if (priv->num_xdp_queues && + (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) { + dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues"); + return -EINVAL; + } + if (!netif_carrier_ok(netdev)) { priv->tx_cfg.num_queues = new_tx; priv->rx_cfg.num_queues = new_rx; @@ -502,7 +547,9 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags) { struct gve_priv *priv = netdev_priv(netdev); u64 ori_flags, new_flags; + int num_tx_queues; + num_tx_queues = gve_num_tx_queues(priv); ori_flags = READ_ONCE(priv->ethtool_flags); new_flags = ori_flags; @@ -522,7 +569,7 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags) /* delete report stats timer. */ if (!(flags & BIT(0)) && (ori_flags & BIT(0))) { int tx_stats_num = GVE_TX_STATS_REPORT_NUM * - priv->tx_cfg.num_queues; + num_tx_queues; int rx_stats_num = GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index 8437133..19994f2 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -4,8 +4,10 @@ * Copyright (C) 2015-2021 Google, Inc. */ +#include #include #include +#include #include #include #include @@ -15,6 +17,7 @@ #include #include #include +#include #include "gve.h" #include "gve_dqo.h" #include "gve_adminq.h" @@ -23,7 +26,7 @@ #define GVE_DEFAULT_RX_COPYBREAK (256) #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK) -#define GVE_VERSION "1.3.3" +#define GVE_VERSION "1.4.0rc1" #define GVE_VERSION_PREFIX "GVE-" // Minimum amount of time between queue kicks in msec (10 seconds) @@ -90,8 +93,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) struct gve_priv *priv = netdev_priv(dev); unsigned int start; u64 packets, bytes; + int num_tx_queues; int ring; + num_tx_queues = gve_num_tx_queues(priv); if (priv->rx) { for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { do { @@ -106,7 +111,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) } } if (priv->tx) { - for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { + for (ring = 0; ring < num_tx_queues; ring++) { do { start = u64_stats_fetch_begin(&priv->tx[ring].statss); @@ -180,7 +185,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv) int tx_stats_num, rx_stats_num; tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) * - priv->tx_cfg.num_queues; + gve_num_tx_queues(priv); rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) * priv->rx_cfg.num_queues; priv->stats_report_len = struct_size(priv->stats_report, stats, @@ -245,8 +250,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget) block = container_of(napi, struct gve_notify_block, napi); priv = block->priv; - if (block->tx) - reschedule |= gve_tx_poll(block, budget); + if (block->tx) { + if (block->tx->q_num < priv->tx_cfg.num_queues) + reschedule |= gve_tx_poll(block, budget); + else + reschedule |= gve_xdp_poll(block, budget); + } + if (block->rx) { work_done = gve_rx_poll(block, budget); reschedule |= work_done == budget; @@ -580,13 +590,36 @@ static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) netif_napi_del(&block->napi); } +static int gve_register_xdp_qpls(struct gve_priv *priv) +{ + int start_id; + int err; + int i; + + start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); + for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { + err = gve_adminq_register_page_list(priv, &priv->qpls[i]); + if (err) { + netif_err(priv, drv, priv->dev, + "failed to register queue page list %d\n", + priv->qpls[i].id); + /* This failure will trigger a reset - no need to clean + * up + */ + return err; + } + } + return 0; +} + static int gve_register_qpls(struct gve_priv *priv) { - int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); + int start_id; int err; int i; - for (i = 0; i < num_qpls; i++) { + start_id = gve_tx_start_qpl_id(priv); + for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { err = gve_adminq_register_page_list(priv, &priv->qpls[i]); if (err) { netif_err(priv, drv, priv->dev, @@ -598,16 +631,63 @@ static int gve_register_qpls(struct gve_priv *priv) return err; } } + + start_id = gve_rx_start_qpl_id(priv); + for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { + err = gve_adminq_register_page_list(priv, &priv->qpls[i]); + if (err) { + netif_err(priv, drv, priv->dev, + "failed to register queue page list %d\n", + priv->qpls[i].id); + /* This failure will trigger a reset - no need to clean + * up + */ + return err; + } + } + return 0; +} + +static int gve_unregister_xdp_qpls(struct gve_priv *priv) +{ + int start_id; + int err; + int i; + + start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); + for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { + err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); + /* This failure will trigger a reset - no need to clean up */ + if (err) { + netif_err(priv, drv, priv->dev, + "Failed to unregister queue page list %d\n", + priv->qpls[i].id); + return err; + } + } return 0; } static int gve_unregister_qpls(struct gve_priv *priv) { - int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); + int start_id; int err; int i; - for (i = 0; i < num_qpls; i++) { + start_id = gve_tx_start_qpl_id(priv); + for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { + err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); + /* This failure will trigger a reset - no need to clean up */ + if (err) { + netif_err(priv, drv, priv->dev, + "Failed to unregister queue page list %d\n", + priv->qpls[i].id); + return err; + } + } + + start_id = gve_rx_start_qpl_id(priv); + for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); /* This failure will trigger a reset - no need to clean up */ if (err) { @@ -620,22 +700,44 @@ static int gve_unregister_qpls(struct gve_priv *priv) return 0; } +static int gve_create_xdp_rings(struct gve_priv *priv) +{ + int err; + + err = gve_adminq_create_tx_queues(priv, + gve_xdp_tx_start_queue_id(priv), + priv->num_xdp_queues); + if (err) { + netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n", + priv->num_xdp_queues); + /* This failure will trigger a reset - no need to clean + * up + */ + return err; + } + netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n", + priv->num_xdp_queues); + + return 0; +} + static int gve_create_rings(struct gve_priv *priv) { + int num_tx_queues = gve_num_tx_queues(priv); int err; int i; - err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues); + err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues); if (err) { netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", - priv->tx_cfg.num_queues); + num_tx_queues); /* This failure will trigger a reset - no need to clean * up */ return err; } netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", - priv->tx_cfg.num_queues); + num_tx_queues); err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); if (err) { @@ -668,6 +770,23 @@ static int gve_create_rings(struct gve_priv *priv) return 0; } +static void add_napi_init_xdp_sync_stats(struct gve_priv *priv, + int (*napi_poll)(struct napi_struct *napi, + int budget)) +{ + int start_id = gve_xdp_tx_start_queue_id(priv); + int i; + + /* Add xdp tx napi & init sync stats*/ + for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { + int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); + + u64_stats_init(&priv->tx[i].statss); + priv->tx[i].ntfy_id = ntfy_idx; + gve_add_napi(priv, ntfy_idx, napi_poll); + } +} + static void add_napi_init_sync_stats(struct gve_priv *priv, int (*napi_poll)(struct napi_struct *napi, int budget)) @@ -675,7 +794,7 @@ static void add_napi_init_sync_stats(struct gve_priv *priv, int i; /* Add tx napi & init sync stats*/ - for (i = 0; i < priv->tx_cfg.num_queues; i++) { + for (i = 0; i < gve_num_tx_queues(priv); i++) { int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); u64_stats_init(&priv->tx[i].statss); @@ -692,34 +811,51 @@ static void add_napi_init_sync_stats(struct gve_priv *priv, } } -static void gve_tx_free_rings(struct gve_priv *priv) +static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings) { if (gve_is_gqi(priv)) { - gve_tx_free_rings_gqi(priv); + gve_tx_free_rings_gqi(priv, start_id, num_rings); } else { gve_tx_free_rings_dqo(priv); } } +static int gve_alloc_xdp_rings(struct gve_priv *priv) +{ + int start_id; + int err = 0; + + if (!priv->num_xdp_queues) + return 0; + + start_id = gve_xdp_tx_start_queue_id(priv); + err = gve_tx_alloc_rings(priv, start_id, priv->num_xdp_queues); + if (err) + return err; + add_napi_init_xdp_sync_stats(priv, gve_napi_poll); + + return 0; +} + static int gve_alloc_rings(struct gve_priv *priv) { int err; /* Setup tx rings */ - priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx), + priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx), GFP_KERNEL); if (!priv->tx) return -ENOMEM; if (gve_is_gqi(priv)) - err = gve_tx_alloc_rings(priv); + err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv)); else err = gve_tx_alloc_rings_dqo(priv); if (err) goto free_tx; /* Setup rx rings */ - priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx), + priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx), GFP_KERNEL); if (!priv->rx) { err = -ENOMEM; @@ -744,18 +880,39 @@ static int gve_alloc_rings(struct gve_priv *priv) kvfree(priv->rx); priv->rx = NULL; free_tx_queue: - gve_tx_free_rings(priv); + gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv)); free_tx: kvfree(priv->tx); priv->tx = NULL; return err; } +static int gve_destroy_xdp_rings(struct gve_priv *priv) +{ + int start_id; + int err; + + start_id = gve_xdp_tx_start_queue_id(priv); + err = gve_adminq_destroy_tx_queues(priv, + start_id, + priv->num_xdp_queues); + if (err) { + netif_err(priv, drv, priv->dev, + "failed to destroy XDP queues\n"); + /* This failure will trigger a reset - no need to clean up */ + return err; + } + netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n"); + + return 0; +} + static int gve_destroy_rings(struct gve_priv *priv) { + int num_tx_queues = gve_num_tx_queues(priv); int err; - err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues); + err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues); if (err) { netif_err(priv, drv, priv->dev, "failed to destroy tx queues\n"); @@ -782,17 +939,33 @@ static void gve_rx_free_rings(struct gve_priv *priv) gve_rx_free_rings_dqo(priv); } +static void gve_free_xdp_rings(struct gve_priv *priv) +{ + int ntfy_idx, start_id; + int i; + + start_id = gve_xdp_tx_start_queue_id(priv); + if (priv->tx) { + for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { + ntfy_idx = gve_tx_idx_to_ntfy(priv, i); + gve_remove_napi(priv, ntfy_idx); + } + gve_tx_free_rings(priv, start_id, priv->num_xdp_queues); + } +} + static void gve_free_rings(struct gve_priv *priv) { + int num_tx_queues = gve_num_tx_queues(priv); int ntfy_idx; int i; if (priv->tx) { - for (i = 0; i < priv->tx_cfg.num_queues; i++) { + for (i = 0; i < num_tx_queues; i++) { ntfy_idx = gve_tx_idx_to_ntfy(priv, i); gve_remove_napi(priv, ntfy_idx); } - gve_tx_free_rings(priv); + gve_tx_free_rings(priv, 0, num_tx_queues); kvfree(priv->tx); priv->tx = NULL; } @@ -889,40 +1062,68 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id) qpl->page_buses[i], gve_qpl_dma_dir(priv, id)); kvfree(qpl->page_buses); + qpl->page_buses = NULL; free_pages: kvfree(qpl->pages); + qpl->pages = NULL; priv->num_registered_pages -= qpl->num_entries; } +static int gve_alloc_xdp_qpls(struct gve_priv *priv) +{ + int start_id; + int i, j; + int err; + + start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); + for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { + err = gve_alloc_queue_page_list(priv, i, + priv->tx_pages_per_qpl); + if (err) + goto free_qpls; + } + + return 0; + +free_qpls: + for (j = start_id; j <= i; j++) + gve_free_queue_page_list(priv, j); + return err; +} + static int gve_alloc_qpls(struct gve_priv *priv) { - int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); + int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues; + int start_id; int i, j; int err; - if (num_qpls == 0) + if (priv->queue_format != GVE_GQI_QPL_FORMAT) return 0; - priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL); + priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL); if (!priv->qpls) return -ENOMEM; - for (i = 0; i < gve_num_tx_qpls(priv); i++) { + start_id = gve_tx_start_qpl_id(priv); + for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { err = gve_alloc_queue_page_list(priv, i, priv->tx_pages_per_qpl); if (err) goto free_qpls; } - for (; i < num_qpls; i++) { + + start_id = gve_rx_start_qpl_id(priv); + for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { err = gve_alloc_queue_page_list(priv, i, priv->rx_data_slot_cnt); if (err) goto free_qpls; } - priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) * + priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) * sizeof(unsigned long) * BITS_PER_BYTE; - priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls), + priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues), sizeof(unsigned long), GFP_KERNEL); if (!priv->qpl_cfg.qpl_id_map) { err = -ENOMEM; @@ -935,23 +1136,36 @@ static int gve_alloc_qpls(struct gve_priv *priv) for (j = 0; j <= i; j++) gve_free_queue_page_list(priv, j); kvfree(priv->qpls); + priv->qpls = NULL; return err; } +static void gve_free_xdp_qpls(struct gve_priv *priv) +{ + int start_id; + int i; + + start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); + for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) + gve_free_queue_page_list(priv, i); +} + static void gve_free_qpls(struct gve_priv *priv) { - int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); + int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues; int i; - if (num_qpls == 0) + if (!priv->qpls) return; kvfree(priv->qpl_cfg.qpl_id_map); + priv->qpl_cfg.qpl_id_map = NULL; - for (i = 0; i < num_qpls; i++) + for (i = 0; i < max_queues; i++) gve_free_queue_page_list(priv, i); kvfree(priv->qpls); + priv->qpls = NULL; } /* Use this to schedule a reset when the device is capable of continuing @@ -969,11 +1183,109 @@ static int gve_reset_recovery(struct gve_priv *priv, bool was_up); static void gve_turndown(struct gve_priv *priv); static void gve_turnup(struct gve_priv *priv); +static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) +{ + struct napi_struct *napi; + struct gve_rx_ring *rx; + int err = 0; + int i, j; + u32 tx_qid; + + if (!priv->num_xdp_queues) + return 0; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + rx = &priv->rx[i]; + napi = &priv->ntfy_blocks[rx->ntfy_id].napi; + + err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i, + napi->napi_id); + if (err) + goto err; + err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) + goto err; + rx->xsk_pool = xsk_get_pool_from_qid(dev, i); + if (rx->xsk_pool) { + err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i, + napi->napi_id); + if (err) + goto err; + err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq, + MEM_TYPE_XSK_BUFF_POOL, NULL); + if (err) + goto err; + xsk_pool_set_rxq_info(rx->xsk_pool, + &rx->xsk_rxq); + } + } + + for (i = 0; i < priv->num_xdp_queues; i++) { + tx_qid = gve_xdp_tx_queue_id(priv, i); + priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i); + } + return 0; + +err: + for (j = i; j >= 0; j--) { + rx = &priv->rx[j]; + if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) + xdp_rxq_info_unreg(&rx->xdp_rxq); + if (xdp_rxq_info_is_reg(&rx->xsk_rxq)) + xdp_rxq_info_unreg(&rx->xsk_rxq); + } + return err; +} + +static void gve_unreg_xdp_info(struct gve_priv *priv) +{ + int i, tx_qid; + + if (!priv->num_xdp_queues) + return; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + struct gve_rx_ring *rx = &priv->rx[i]; + + xdp_rxq_info_unreg(&rx->xdp_rxq); + if (rx->xsk_pool) { + xdp_rxq_info_unreg(&rx->xsk_rxq); + rx->xsk_pool = NULL; + } + } + + for (i = 0; i < priv->num_xdp_queues; i++) { + tx_qid = gve_xdp_tx_queue_id(priv, i); + priv->tx[tx_qid].xsk_pool = NULL; + } +} + +static void gve_drain_page_cache(struct gve_priv *priv) +{ + struct page_frag_cache *nc; + int i; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + nc = &priv->rx[i].page_cache; + if (nc->va) { + __page_frag_cache_drain(virt_to_page(nc->va), + nc->pagecnt_bias); + nc->va = NULL; + } + } +} + static int gve_open(struct net_device *dev) { struct gve_priv *priv = netdev_priv(dev); int err; + if (priv->xdp_prog) + priv->num_xdp_queues = priv->rx_cfg.num_queues; + else + priv->num_xdp_queues = 0; + err = gve_alloc_qpls(priv); if (err) return err; @@ -989,6 +1301,10 @@ static int gve_open(struct net_device *dev) if (err) goto free_rings; + err = gve_reg_xdp_info(priv, dev); + if (err) + goto free_rings; + err = gve_register_qpls(priv); if (err) goto reset; @@ -1043,6 +1359,7 @@ static int gve_close(struct net_device *dev) netif_carrier_off(dev); if (gve_get_device_rings_ok(priv)) { gve_turndown(priv); + gve_drain_page_cache(priv); err = gve_destroy_rings(priv); if (err) goto err; @@ -1053,6 +1370,7 @@ static int gve_close(struct net_device *dev) } del_timer_sync(&priv->stats_report_timer); + gve_unreg_xdp_info(priv); gve_free_rings(priv); gve_free_qpls(priv); priv->interface_down_cnt++; @@ -1069,6 +1387,306 @@ static int gve_close(struct net_device *dev) return gve_reset_recovery(priv, false); } +static int gve_remove_xdp_queues(struct gve_priv *priv) +{ + int err; + + err = gve_destroy_xdp_rings(priv); + if (err) + return err; + + err = gve_unregister_xdp_qpls(priv); + if (err) + return err; + + gve_unreg_xdp_info(priv); + gve_free_xdp_rings(priv); + gve_free_xdp_qpls(priv); + priv->num_xdp_queues = 0; + return 0; +} + +static int gve_add_xdp_queues(struct gve_priv *priv) +{ + int err; + + priv->num_xdp_queues = priv->tx_cfg.num_queues; + + err = gve_alloc_xdp_qpls(priv); + if (err) + goto err; + + err = gve_alloc_xdp_rings(priv); + if (err) + goto free_xdp_qpls; + + err = gve_reg_xdp_info(priv, priv->dev); + if (err) + goto free_xdp_rings; + + err = gve_register_xdp_qpls(priv); + if (err) + goto free_xdp_rings; + + err = gve_create_xdp_rings(priv); + if (err) + goto free_xdp_rings; + + return 0; + +free_xdp_rings: + gve_free_xdp_rings(priv); +free_xdp_qpls: + gve_free_xdp_qpls(priv); +err: + priv->num_xdp_queues = 0; + return err; +} + +static void gve_handle_link_status(struct gve_priv *priv, bool link_status) +{ + if (!gve_get_napi_enabled(priv)) + return; + + if (link_status == netif_carrier_ok(priv->dev)) + return; + + if (link_status) { + netdev_info(priv->dev, "Device link is up.\n"); + netif_carrier_on(priv->dev); + } else { + netdev_info(priv->dev, "Device link is down.\n"); + netif_carrier_off(priv->dev); + } +} + +static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct bpf_prog *old_prog; + int err = 0; + u32 status; + + old_prog = READ_ONCE(priv->xdp_prog); + if (!netif_carrier_ok(priv->dev)) { + WRITE_ONCE(priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + return 0; + } + + gve_turndown(priv); + if (!old_prog && prog) { + // Allocate XDP TX queues if an XDP program is + // being installed + err = gve_add_xdp_queues(priv); + if (err) + goto out; + } else if (old_prog && !prog) { + // Remove XDP TX queues if an XDP program is + // being uninstalled + err = gve_remove_xdp_queues(priv); + if (err) + goto out; + } + WRITE_ONCE(priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + +out: + gve_turnup(priv); + status = ioread32be(&priv->reg_bar0->device_status); + gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); + return err; +} + +static int gve_xsk_pool_enable(struct net_device *dev, + struct xsk_buff_pool *pool, + u16 qid) +{ + struct gve_priv *priv = netdev_priv(dev); + struct napi_struct *napi; + struct gve_rx_ring *rx; + int tx_qid; + int err; + + if (qid >= priv->rx_cfg.num_queues) { + dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid); + return -EINVAL; + } + if (xsk_pool_get_rx_frame_size(pool) < + priv->dev->max_mtu + sizeof(struct ethhdr)) { + dev_err(&priv->pdev->dev, "xsk pool frame_len too small"); + return -EINVAL; + } + + err = xsk_pool_dma_map(pool, &priv->pdev->dev, + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + if (err) + return err; + + /* If XDP prog is not installed, return */ + if (!priv->xdp_prog) + return 0; + + rx = &priv->rx[qid]; + napi = &priv->ntfy_blocks[rx->ntfy_id].napi; + err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id); + if (err) + goto err; + + err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq, + MEM_TYPE_XSK_BUFF_POOL, NULL); + if (err) + goto err; + + xsk_pool_set_rxq_info(pool, &rx->xsk_rxq); + rx->xsk_pool = pool; + + tx_qid = gve_xdp_tx_queue_id(priv, qid); + priv->tx[tx_qid].xsk_pool = pool; + + return 0; +err: + if (xdp_rxq_info_is_reg(&rx->xsk_rxq)) + xdp_rxq_info_unreg(&rx->xsk_rxq); + + xsk_pool_dma_unmap(pool, + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + return err; +} + +static int gve_xsk_pool_disable(struct net_device *dev, + u16 qid) +{ + struct gve_priv *priv = netdev_priv(dev); + struct napi_struct *napi_rx; + struct napi_struct *napi_tx; + struct xsk_buff_pool *pool; + int tx_qid; + + pool = xsk_get_pool_from_qid(dev, qid); + if (!pool) + return -EINVAL; + if (qid >= priv->rx_cfg.num_queues) + return -EINVAL; + + /* If XDP prog is not installed, unmap DMA and return */ + if (!priv->xdp_prog) + goto done; + + tx_qid = gve_xdp_tx_queue_id(priv, qid); + if (!netif_running(dev)) { + priv->rx[qid].xsk_pool = NULL; + xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); + priv->tx[tx_qid].xsk_pool = NULL; + goto done; + } + + napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi; + napi_disable(napi_rx); /* make sure current rx poll is done */ + + napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi; + napi_disable(napi_tx); /* make sure current tx poll is done */ + + priv->rx[qid].xsk_pool = NULL; + xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); + priv->tx[tx_qid].xsk_pool = NULL; + smp_mb(); /* Make sure it is visible to the workers on datapath */ + + napi_enable(napi_rx); + if (gve_rx_work_pending(&priv->rx[qid])) + napi_schedule(napi_rx); + + napi_enable(napi_tx); + if (gve_tx_clean_pending(priv, &priv->tx[tx_qid])) + napi_schedule(napi_tx); + +done: + xsk_pool_dma_unmap(pool, + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + return 0; +} + +static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct gve_priv *priv = netdev_priv(dev); + int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id); + + if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog) + return -EINVAL; + + if (flags & XDP_WAKEUP_TX) { + struct gve_tx_ring *tx = &priv->tx[tx_queue_id]; + struct napi_struct *napi = + &priv->ntfy_blocks[tx->ntfy_id].napi; + + if (!napi_if_scheduled_mark_missed(napi)) { + /* Call local_bh_enable to trigger SoftIRQ processing */ + local_bh_disable(); + napi_schedule(napi); + local_bh_enable(); + } + + tx->xdp_xsk_wakeup++; + } + + return 0; +} + +static int verify_xdp_configuration(struct net_device *dev) +{ + struct gve_priv *priv = netdev_priv(dev); + + if (dev->features & NETIF_F_LRO) { + netdev_warn(dev, "XDP is not supported when LRO is on.\n"); + return -EOPNOTSUPP; + } + + if (priv->queue_format != GVE_GQI_QPL_FORMAT) { + netdev_warn(dev, "XDP is not supported in mode %d.\n", + priv->queue_format); + return -EOPNOTSUPP; + } + + if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) { + netdev_warn(dev, "XDP is not supported for mtu %d.\n", + dev->mtu); + return -EOPNOTSUPP; + } + + if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues || + (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) { + netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d", + priv->rx_cfg.num_queues, + priv->tx_cfg.num_queues, + priv->tx_cfg.max_queues); + return -EINVAL; + } + return 0; +} + +static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct gve_priv *priv = netdev_priv(dev); + int err; + + err = verify_xdp_configuration(dev); + if (err) + return err; + switch (xdp->command) { + case XDP_SETUP_PROG: + return gve_set_xdp(priv, xdp->prog, xdp->extack); + case XDP_SETUP_XSK_POOL: + if (xdp->xsk.pool) + return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id); + else + return gve_xsk_pool_disable(dev, xdp->xsk.queue_id); + default: + return -EINVAL; + } +} + int gve_adjust_queues(struct gve_priv *priv, struct gve_queue_config new_rx_config, struct gve_queue_config new_tx_config) @@ -1118,7 +1736,7 @@ static void gve_turndown(struct gve_priv *priv) return; /* Disable napi to prevent more work from coming in */ - for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { + for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; @@ -1146,7 +1764,7 @@ static void gve_turnup(struct gve_priv *priv) netif_tx_start_all_queues(priv->dev); /* Enable napi and unmask interrupts for all queues */ - for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { + for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; @@ -1263,6 +1881,9 @@ static const struct net_device_ops gve_netdev_ops = { .ndo_get_stats64 = gve_get_stats, .ndo_tx_timeout = gve_tx_timeout, .ndo_set_features = gve_set_features, + .ndo_bpf = gve_xdp, + .ndo_xdp_xmit = gve_xdp_xmit, + .ndo_xsk_wakeup = gve_xsk_wakeup, }; static void gve_handle_status(struct gve_priv *priv, u32 status) @@ -1306,7 +1927,7 @@ void gve_handle_report_stats(struct gve_priv *priv) be64_add_cpu(&priv->stats_report->written_count, 1); /* tx stats */ if (priv->tx) { - for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { + for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { u32 last_completion = 0; u32 tx_frames = 0; @@ -1369,23 +1990,6 @@ void gve_handle_report_stats(struct gve_priv *priv) } } -static void gve_handle_link_status(struct gve_priv *priv, bool link_status) -{ - if (!gve_get_napi_enabled(priv)) - return; - - if (link_status == netif_carrier_ok(priv->dev)) - return; - - if (link_status) { - netdev_info(priv->dev, "Device link is up.\n"); - netif_carrier_on(priv->dev); - } else { - netdev_info(priv->dev, "Device link is down.\n"); - netif_carrier_off(priv->dev); - } -} - /* Handle NIC status register changes, reset requests and report stats */ static void gve_service_task(struct work_struct *work) { @@ -1399,6 +2003,18 @@ static void gve_service_task(struct work_struct *work) gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); } +static void gve_set_netdev_xdp_features(struct gve_priv *priv) +{ + if (priv->queue_format == GVE_GQI_QPL_FORMAT) { + priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC; + priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT; + priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT; + priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; + } else { + priv->dev->xdp_features = 0; + } +} + static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) { int num_ntfy; @@ -1477,6 +2093,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) } setup_device: + gve_set_netdev_xdp_features(priv); err = gve_setup_device_resources(priv); if (!err) return 0; diff --git a/google/gve/gve_rx.c b/google/gve/gve_rx.c index 1f55137..d1da741 100644 --- a/google/gve/gve_rx.c +++ b/google/gve/gve_rx.c @@ -8,6 +8,9 @@ #include "gve_adminq.h" #include "gve_utils.h" #include +#include +#include +#include static void gve_rx_free_buffer(struct device *dev, struct gve_rx_slot_page_info *page_info, @@ -124,7 +127,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) return -ENOMEM; if (!rx->data.raw_addressing) { - rx->data.qpl = gve_assign_rx_qpl(priv); + rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num); if (!rx->data.qpl) { kvfree(rx->data.page_info); rx->data.page_info = NULL; @@ -556,7 +559,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, if (len <= priv->rx_copybreak && is_only_frag) { /* Just copy small packets */ - skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD); + skb = gve_rx_copy(netdev, napi, page_info, len); if (skb) { u64_stats_update_begin(&rx->statss); rx->rx_copied_pkt++; @@ -591,6 +594,107 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, return skb; } +static int gve_xsk_pool_redirect(struct net_device *dev, + struct gve_rx_ring *rx, + void *data, int len, + struct bpf_prog *xdp_prog) +{ + struct xdp_buff *xdp; + int err; + + if (rx->xsk_pool->frame_len < len) + return -E2BIG; + xdp = xsk_buff_alloc(rx->xsk_pool); + if (!xdp) { + u64_stats_update_begin(&rx->statss); + rx->xdp_alloc_fails++; + u64_stats_update_end(&rx->statss); + return -ENOMEM; + } + xdp->data_end = xdp->data + len; + memcpy(xdp->data, data, len); + err = xdp_do_redirect(dev, xdp, xdp_prog); + if (err) + xsk_buff_free(xdp); + return err; +} + +static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx, + struct xdp_buff *orig, struct bpf_prog *xdp_prog) +{ + int total_len, len = orig->data_end - orig->data; + int headroom = XDP_PACKET_HEADROOM; + struct xdp_buff new; + void *frame; + int err; + + if (rx->xsk_pool) + return gve_xsk_pool_redirect(dev, rx, orig->data, + len, xdp_prog); + + total_len = headroom + SKB_DATA_ALIGN(len) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC); + if (!frame) { + u64_stats_update_begin(&rx->statss); + rx->xdp_alloc_fails++; + u64_stats_update_end(&rx->statss); + return -ENOMEM; + } + xdp_init_buff(&new, total_len, &rx->xdp_rxq); + xdp_prepare_buff(&new, frame, headroom, len, false); + memcpy(new.data, orig->data, len); + + err = xdp_do_redirect(dev, &new, xdp_prog); + if (err) + page_frag_free(frame); + + return err; +} + +static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx, + struct xdp_buff *xdp, struct bpf_prog *xprog, + int xdp_act) +{ + struct gve_tx_ring *tx; + int tx_qid; + int err; + + switch (xdp_act) { + case XDP_ABORTED: + case XDP_DROP: + default: + break; + case XDP_TX: + tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num); + tx = &priv->tx[tx_qid]; + spin_lock(&tx->xdp_lock); + err = gve_xdp_xmit_one(priv, tx, xdp->data, + xdp->data_end - xdp->data, NULL); + spin_unlock(&tx->xdp_lock); + + if (unlikely(err)) { + u64_stats_update_begin(&rx->statss); + rx->xdp_tx_errors++; + u64_stats_update_end(&rx->statss); + } + break; + case XDP_REDIRECT: + err = gve_xdp_redirect(priv->dev, rx, xdp, xprog); + + if (unlikely(err)) { + u64_stats_update_begin(&rx->statss); + rx->xdp_redirect_errors++; + u64_stats_update_end(&rx->statss); + } + break; + } + u64_stats_update_begin(&rx->statss); + if ((u32)xdp_act < GVE_XDP_ACTIONS) + rx->xdp_actions[xdp_act]++; + u64_stats_update_end(&rx->statss); +} + #define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x)) static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, struct gve_rx_desc *desc, u32 idx, @@ -603,9 +707,12 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, union gve_rx_data_slot *data_slot; struct gve_priv *priv = rx->gve; struct sk_buff *skb = NULL; + struct bpf_prog *xprog; + struct xdp_buff xdp; dma_addr_t page_bus; void *va; + u16 len = frag_size; struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; bool is_first_frag = ctx->frag_cnt == 0; @@ -645,9 +752,35 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, PAGE_SIZE, DMA_FROM_DEVICE); page_info->pad = is_first_frag ? GVE_RX_PAD : 0; + len -= page_info->pad; frag_size -= page_info->pad; - skb = gve_rx_skb(priv, rx, page_info, napi, frag_size, + xprog = READ_ONCE(priv->xdp_prog); + if (xprog && is_only_frag) { + void *old_data; + int xdp_act; + + xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq); + xdp_prepare_buff(&xdp, page_info->page_address + + page_info->page_offset, GVE_RX_PAD, + len, false); + old_data = xdp.data; + xdp_act = bpf_prog_run_xdp(xprog, &xdp); + if (xdp_act != XDP_PASS) { + gve_xdp_done(priv, rx, &xdp, xprog, xdp_act); + ctx->total_size += frag_size; + goto finish_ok_pkt; + } + + page_info->pad += xdp.data - old_data; + len = xdp.data_end - xdp.data; + + u64_stats_update_begin(&rx->statss); + rx->xdp_actions[XDP_PASS]++; + u64_stats_update_end(&rx->statss); + } + + skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot, is_only_frag); if (!skb) { u64_stats_update_begin(&rx->statss); @@ -773,6 +906,8 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, netdev_features_t feat) { + u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT]; + u64 xdp_txs = rx->xdp_actions[XDP_TX]; struct gve_rx_ctx *ctx = &rx->ctx; struct gve_priv *priv = rx->gve; struct gve_rx_cnts cnts = {0}; @@ -820,6 +955,12 @@ static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, u64_stats_update_end(&rx->statss); } + if (xdp_txs != rx->xdp_actions[XDP_TX]) + gve_xdp_tx_flush(priv, rx->q_num); + + if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT]) + xdp_do_flush(); + /* restock ring slots */ if (!rx->data.raw_addressing) { /* In QPL mode buffs are refilled as the desc are processed */ diff --git a/google/gve/gve_rx_dqo.c b/google/gve/gve_rx_dqo.c index 630f42a..e57b73e 100644 --- a/google/gve/gve_rx_dqo.c +++ b/google/gve/gve_rx_dqo.c @@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, if (eop && buf_len <= priv->rx_copybreak) { rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, - &buf_state->page_info, buf_len, 0); + &buf_state->page_info, buf_len); if (unlikely(!rx->ctx.skb_head)) goto error; rx->ctx.skb_tail = rx->ctx.skb_head; diff --git a/google/gve/gve_tx.c b/google/gve/gve_tx.c index 4888bf0..e50510b 100644 --- a/google/gve/gve_tx.c +++ b/google/gve/gve_tx.c @@ -11,6 +11,7 @@ #include #include #include +#include static inline void gve_tx_put_doorbell(struct gve_priv *priv, struct gve_queue_resources *q_resources, @@ -19,6 +20,14 @@ static inline void gve_tx_put_doorbell(struct gve_priv *priv, iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]); } +void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid) +{ + u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid); + struct gve_tx_ring *tx = &priv->tx[tx_qid]; + + gve_tx_put_doorbell(priv, tx->q_resources, tx->req); +} + /* gvnic can only transmit from a Registered Segment. * We copy skb payloads into the registered segment before writing Tx * descriptors and ringing the Tx doorbell. @@ -132,6 +141,58 @@ static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes) atomic_add(bytes, &fifo->available); } +static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info) +{ + size_t space_freed = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(info->iov); i++) { + space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; + info->iov[i].iov_len = 0; + info->iov[i].iov_padding = 0; + } + return space_freed; +} + +static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, + u32 to_do) +{ + struct gve_tx_buffer_state *info; + u32 clean_end = tx->done + to_do; + u64 pkts = 0, bytes = 0; + size_t space_freed = 0; + u32 xsk_complete = 0; + u32 idx; + + for (; tx->done < clean_end; tx->done++) { + idx = tx->done & tx->mask; + info = &tx->info[idx]; + + if (unlikely(!info->xdp.size)) + continue; + + bytes += info->xdp.size; + pkts++; + xsk_complete += info->xdp.is_xsk; + + info->xdp.size = 0; + if (info->xdp_frame) { + xdp_return_frame(info->xdp_frame); + info->xdp_frame = NULL; + } + space_freed += gve_tx_clear_buffer_state(info); + } + + gve_tx_free_fifo(&tx->tx_fifo, space_freed); + if (xsk_complete > 0 && tx->xsk_pool) + xsk_tx_completed(tx->xsk_pool, xsk_complete); + u64_stats_update_begin(&tx->statss); + tx->bytes_done += bytes; + tx->pkt_done += pkts; + u64_stats_update_end(&tx->statss); + return pkts; +} + static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, u32 to_do, bool try_to_wake); @@ -144,8 +205,12 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx) gve_tx_remove_from_block(priv, idx); slots = tx->mask + 1; - gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); - netdev_tx_reset_queue(tx->netdev_txq); + if (tx->q_num < priv->tx_cfg.num_queues) { + gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); + netdev_tx_reset_queue(tx->netdev_txq); + } else { + gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt); + } dma_free_coherent(hdev, sizeof(*tx->q_resources), tx->q_resources, tx->q_resources_bus); @@ -177,6 +242,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) /* Make sure everything is zeroed to start */ memset(tx, 0, sizeof(*tx)); spin_lock_init(&tx->clean_lock); + spin_lock_init(&tx->xdp_lock); tx->q_num = idx; tx->mask = slots - 1; @@ -195,7 +261,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; tx->dev = &priv->pdev->dev; if (!tx->raw_addressing) { - tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); + tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx); if (!tx->tx_fifo.qpl) goto abort_with_desc; /* map Tx FIFO */ @@ -213,7 +279,8 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx, (unsigned long)tx->bus); - tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); + if (idx < priv->tx_cfg.num_queues) + tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); gve_tx_add_to_block(priv, idx); return 0; @@ -233,12 +300,12 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) return -ENOMEM; } -int gve_tx_alloc_rings(struct gve_priv *priv) +int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings) { int err = 0; int i; - for (i = 0; i < priv->tx_cfg.num_queues; i++) { + for (i = start_id; i < start_id + num_rings; i++) { err = gve_tx_alloc_ring(priv, i); if (err) { netif_err(priv, drv, priv->dev, @@ -251,17 +318,17 @@ int gve_tx_alloc_rings(struct gve_priv *priv) if (err) { int j; - for (j = 0; j < i; j++) + for (j = start_id; j < i; j++) gve_tx_free_ring(priv, j); } return err; } -void gve_tx_free_rings_gqi(struct gve_priv *priv) +void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings) { int i; - for (i = 0; i < priv->tx_cfg.num_queues; i++) + for (i = start_id; i < start_id + num_rings; i++) gve_tx_free_ring(priv, i); } @@ -374,18 +441,18 @@ static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, } static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, - struct sk_buff *skb, bool is_gso, + u16 csum_offset, u8 ip_summed, bool is_gso, int l4_hdr_offset, u32 desc_cnt, - u16 hlen, u64 addr) + u16 hlen, u64 addr, u16 pkt_len) { /* l4_hdr_offset and csum_offset are in units of 16-bit words */ if (is_gso) { pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM; - pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1; + pkt_desc->pkt.l4_csum_offset = csum_offset >> 1; pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; - } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + } else if (likely(ip_summed == CHECKSUM_PARTIAL)) { pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM; - pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1; + pkt_desc->pkt.l4_csum_offset = csum_offset >> 1; pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; } else { pkt_desc->pkt.type_flags = GVE_TXD_STD; @@ -393,7 +460,7 @@ static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, pkt_desc->pkt.l4_hdr_offset = 0; } pkt_desc->pkt.desc_cnt = desc_cnt; - pkt_desc->pkt.len = cpu_to_be16(skb->len); + pkt_desc->pkt.len = cpu_to_be16(pkt_len); pkt_desc->pkt.seg_len = cpu_to_be16(hlen); pkt_desc->pkt.seg_addr = cpu_to_be64(addr); } @@ -412,15 +479,16 @@ static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc, } static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc, - struct sk_buff *skb, bool is_gso, + u16 l3_offset, u16 gso_size, + bool is_gso_v6, bool is_gso, u16 len, u64 addr) { seg_desc->seg.type_flags = GVE_TXD_SEG; if (is_gso) { - if (skb_is_gso_v6(skb)) + if (is_gso_v6) seg_desc->seg.type_flags |= GVE_TXSF_IPV6; - seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1; - seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + seg_desc->seg.l3_offset = l3_offset >> 1; + seg_desc->seg.mss = cpu_to_be16(gso_size); } seg_desc->seg.seg_len = cpu_to_be16(len); seg_desc->seg.seg_addr = cpu_to_be64(addr); @@ -473,9 +541,10 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, &info->iov[payload_iov]); - gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, + gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed, + is_gso, l4_hdr_offset, 1 + mtd_desc_nr + payload_nfrags, hlen, - info->iov[hdr_nfrags - 1].iov_offset); + info->iov[hdr_nfrags - 1].iov_offset, skb->len); skb_copy_bits(skb, 0, tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, @@ -494,7 +563,9 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; seg_desc = &tx->desc[next_idx]; - gve_tx_fill_seg_desc(seg_desc, skb, is_gso, + gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb), + skb_shinfo(skb)->gso_size, + skb_is_gso_v6(skb), is_gso, info->iov[i].iov_len, info->iov[i].iov_offset); @@ -552,8 +623,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, if (mtd_desc_nr) num_descriptors++; - gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, - num_descriptors, hlen, addr); + gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed, + is_gso, l4_hdr_offset, + num_descriptors, hlen, addr, skb->len); if (mtd_desc_nr) { idx = (idx + 1) & tx->mask; @@ -569,7 +641,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, addr += hlen; idx = (idx + 1) & tx->mask; seg_desc = &tx->desc[idx]; - gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); + gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb), + skb_shinfo(skb)->gso_size, + skb_is_gso_v6(skb), is_gso, len, addr); } for (i = 0; i < shinfo->nr_frags; i++) { @@ -587,7 +661,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, dma_unmap_len_set(&tx->info[idx], len, len); dma_unmap_addr_set(&tx->info[idx], dma, addr); - gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); + gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb), + skb_shinfo(skb)->gso_size, + skb_is_gso_v6(skb), is_gso, len, addr); } return num_descriptors; @@ -648,6 +724,103 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, + void *data, int len, void *frame_p, bool is_xsk) +{ + int pad, nfrags, ndescs, iovi, offset; + struct gve_tx_buffer_state *info; + u32 reqi = tx->req; + + pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len); + if (pad >= GVE_TX_MAX_HEADER_SIZE) + pad = 0; + info = &tx->info[reqi & tx->mask]; + info->xdp_frame = frame_p; + info->xdp.size = len; + info->xdp.is_xsk = is_xsk; + + nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len, + &info->iov[0]); + iovi = pad > 0; + ndescs = nfrags - iovi; + offset = 0; + + while (iovi < nfrags) { + if (!offset) + gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0, + CHECKSUM_NONE, false, 0, ndescs, + info->iov[iovi].iov_len, + info->iov[iovi].iov_offset, len); + else + gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask], + 0, 0, false, false, + info->iov[iovi].iov_len, + info->iov[iovi].iov_offset); + + memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset, + data + offset, info->iov[iovi].iov_len); + gve_dma_sync_for_device(&priv->pdev->dev, + tx->tx_fifo.qpl->page_buses, + info->iov[iovi].iov_offset, + info->iov[iovi].iov_len); + offset += info->iov[iovi].iov_len; + iovi++; + reqi++; + } + + return ndescs; +} + +int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_tx_ring *tx; + int i, err = 0, qid; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + qid = gve_xdp_tx_queue_id(priv, + smp_processor_id() % priv->num_xdp_queues); + + tx = &priv->tx[qid]; + + spin_lock(&tx->xdp_lock); + for (i = 0; i < n; i++) { + err = gve_xdp_xmit_one(priv, tx, frames[i]->data, + frames[i]->len, frames[i]); + if (err) + break; + } + + if (flags & XDP_XMIT_FLUSH) + gve_tx_put_doorbell(priv, tx->q_resources, tx->req); + + spin_unlock(&tx->xdp_lock); + + u64_stats_update_begin(&tx->statss); + tx->xdp_xmit += n; + tx->xdp_xmit_errors += n - i; + u64_stats_update_end(&tx->statss); + + return i ? i : err; +} + +int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, + void *data, int len, void *frame_p) +{ + int nsegs; + + if (!gve_can_tx(tx, len + GVE_TX_MAX_HEADER_SIZE - 1)) + return -EBUSY; + + nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false); + tx->req += nsegs; + + return 0; +} + #define GVE_TX_START_THRESH PAGE_SIZE static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, @@ -657,8 +830,8 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, u64 pkts = 0, bytes = 0; size_t space_freed = 0; struct sk_buff *skb; - int i, j; u32 idx; + int j; for (j = 0; j < to_do; j++) { idx = tx->done & tx->mask; @@ -680,12 +853,7 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, dev_consume_skb_any(skb); if (tx->raw_addressing) continue; - /* FIFO free */ - for (i = 0; i < ARRAY_SIZE(info->iov); i++) { - space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; - info->iov[i].iov_len = 0; - info->iov[i].iov_padding = 0; - } + space_freed += gve_tx_clear_buffer_state(info); } } @@ -720,6 +888,70 @@ u32 gve_tx_load_event_counter(struct gve_priv *priv, return be32_to_cpu(counter); } +static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, + int budget) +{ + struct xdp_desc desc; + int sent = 0, nsegs; + void *data; + + spin_lock(&tx->xdp_lock); + while (sent < budget) { + if (!gve_can_tx(tx, GVE_TX_START_THRESH)) + goto out; + + if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) { + tx->xdp_xsk_done = tx->xdp_xsk_wakeup; + goto out; + } + + data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); + nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true); + tx->req += nsegs; + sent++; + } +out: + if (sent > 0) { + gve_tx_put_doorbell(priv, tx->q_resources, tx->req); + xsk_tx_release(tx->xsk_pool); + } + spin_unlock(&tx->xdp_lock); + return sent; +} + +bool gve_xdp_poll(struct gve_notify_block *block, int budget) +{ + struct gve_priv *priv = block->priv; + struct gve_tx_ring *tx = block->tx; + u32 nic_done; + bool repoll; + u32 to_do; + + /* If budget is 0, do all the work */ + if (budget == 0) + budget = INT_MAX; + + /* Find out how much work there is to be done */ + nic_done = gve_tx_load_event_counter(priv, tx); + to_do = min_t(u32, (nic_done - tx->done), budget); + gve_clean_xdp_done(priv, tx, to_do); + repoll = nic_done != tx->done; + + if (tx->xsk_pool) { + int sent = gve_xsk_tx(priv, tx, budget); + + u64_stats_update_begin(&tx->statss); + tx->xdp_xsk_sent += sent; + u64_stats_update_end(&tx->statss); + repoll |= (sent == budget); + if (xsk_uses_need_wakeup(tx->xsk_pool)) + xsk_set_tx_need_wakeup(tx->xsk_pool); + } + + /* If we still have work we want to repoll */ + return repoll; +} + bool gve_tx_poll(struct gve_notify_block *block, int budget) { struct gve_priv *priv = block->priv; diff --git a/google/gve/gve_utils.c b/google/gve/gve_utils.c index 6ba46ad..26e08d7 100644 --- a/google/gve/gve_utils.c +++ b/google/gve/gve_utils.c @@ -49,10 +49,10 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) } struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, - struct gve_rx_slot_page_info *page_info, u16 len, - u16 padding) + struct gve_rx_slot_page_info *page_info, u16 len) { - void *va = page_info->page_address + padding + page_info->page_offset; + void *va = page_info->page_address + page_info->page_offset + + page_info->pad; struct sk_buff *skb; skb = napi_alloc_skb(napi, len); diff --git a/google/gve/gve_utils.h b/google/gve/gve_utils.h index 7959594..324fd98 100644 --- a/google/gve/gve_utils.h +++ b/google/gve/gve_utils.h @@ -18,8 +18,7 @@ void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx); void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, - struct gve_rx_slot_page_info *page_info, u16 len, - u16 pad); + struct gve_rx_slot_page_info *page_info, u16 len); /* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */ void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info); diff --git a/patches/patch_xdp1.cocci b/patches/patch_xdp1.cocci new file mode 100644 index 0000000..8bde91d --- /dev/null +++ b/patches/patch_xdp1.cocci @@ -0,0 +1,307 @@ +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +#include ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +#include ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +#include ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +#include ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ +static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) +{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +... ++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++ return 0; ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ +} + +@@ +@@ +static void gve_unreg_xdp_info(struct gve_priv *priv) +{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +... ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ +} + +@@ +@@ +static void gve_drain_page_cache(struct gve_priv *priv) +{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + ... ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) */ +} + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_xsk_pool_enable(struct net_device *dev, + struct xsk_buff_pool *pool, + u16 qid) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_xsk_pool_disable(struct net_device *dev, + u16 qid) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int verify_xdp_configuration(struct net_device *dev) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +identifier gve_netdev_ops; +identifier gve_xdp, gve_xdp_xmit, gve_xsk_wakeup; +@@ +struct net_device_ops gve_netdev_ops = { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + .ndo_bpf = gve_xdp, + .ndo_xdp_xmit = gve_xdp_xmit, + .ndo_xsk_wakeup = gve_xsk_wakeup, ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ +}; + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, + int budget) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +type bool; +identifier gve_xdp_poll, block, budget; +identifier tx, repoll; +@@ +bool gve_xdp_poll(struct gve_notify_block *block, int budget) +{ +... ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + if (tx->xsk_pool) { + ... + } + ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + /* If we still have work we want to repoll */ + return repoll; +} + + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + if (xsk_complete > 0 && tx->xsk_pool) + xsk_tx_completed(tx->xsk_pool, xsk_complete); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, + void *data, int len, void *frame_p) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, + void *data, int len, void *frame_p, bool is_xsk) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_remove_xdp_queues(struct gve_priv *priv) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_add_xdp_queues(struct gve_priv *priv) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static void gve_free_xdp_qpls(struct gve_priv *priv) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_alloc_xdp_qpls(struct gve_priv *priv) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static void gve_free_xdp_rings(struct gve_priv *priv) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_destroy_xdp_rings(struct gve_priv *priv) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_alloc_xdp_rings(struct gve_priv *priv) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_create_xdp_rings(struct gve_priv *priv) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_unregister_xdp_qpls(struct gve_priv *priv) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_register_xdp_qpls(struct gve_priv *priv) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static void add_napi_init_xdp_sync_stats(struct gve_priv *priv, + int (*napi_poll)(struct napi_struct *napi, + int budget)) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + xdp_return_frame(info->xdp_frame); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ +static void gve_set_netdev_xdp_features(struct gve_priv *priv) +{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) || defined(KUNIT_KERNEL) +... ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) || defined(KUNIT_KERNEL) */ +} diff --git a/patches/patch_xdp2.cocci b/patches/patch_xdp2.cocci new file mode 100644 index 0000000..dae4cd5 --- /dev/null +++ b/patches/patch_xdp2.cocci @@ -0,0 +1,107 @@ +@@ +@@ +struct gve_rx_ring { + ... ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + /* XDP stuff */ + struct xdp_rxq_info xdp_rxq; + struct xdp_rxq_info xsk_rxq; + struct xsk_buff_pool *xsk_pool; ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + ... +}; + +@@ +@@ +struct gve_tx_ring { + ... ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + struct xsk_buff_pool *xsk_pool; ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + ... +}; + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_xsk_pool_redirect(struct net_device *dev, + struct gve_rx_ring *rx, + void *data, int len, + struct bpf_prog *xdp_prog) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx, + struct xdp_buff *orig, struct bpf_prog *xdp_prog) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + + +@@ +type bool; +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +static bool gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx, + struct xdp_buff *xdp, struct bpf_prog *xprog, + int xdp_act) +{ +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@ assign @ +identifier xprog, xdp; +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + struct bpf_prog *xprog; + struct xdp_buff xdp; ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@ gve_rx @ +identifier assign.xprog, assign.xdp; +identifier READ_ONCE; +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) +xprog = READ_ONCE(priv->xdp_prog); +if (xprog && is_only_frag) { +... +} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + + +@ assign2 @ +identifier xdp_redirects, rx; +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT]; ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +identifier assign2.xdp_redirects, assign2.rx; +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT]) + xdp_do_flush(); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@ assign3 @ +identifier xdp_txs, rx; +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + u64 xdp_txs = rx->xdp_actions[XDP_TX]; ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ + +@@ +identifier assign3.xdp_txs, assign3.rx; +@@ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) + if (xdp_txs != rx->xdp_actions[XDP_TX]) + gve_xdp_tx_flush(priv, rx->q_num); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ From 02d0960126b3f897eb8e279566acff42f399108b Mon Sep 17 00:00:00 2001 From: Praveen Kaligineedi Date: Fri, 14 Apr 2023 15:09:36 -0700 Subject: [PATCH 08/28] Fix coccinelle patches. --- patches/header/gve_linux_version.h | 3 + patches/ndo_tx_timeout.cocci | 12 +-- patches/patch_xdp1.cocci | 130 ++++++++++++++--------------- patches/patch_xdp2.cocci | 44 +++++----- 4 files changed, 96 insertions(+), 93 deletions(-) diff --git a/patches/header/gve_linux_version.h b/patches/header/gve_linux_version.h index 7d8410c..500c86a 100644 --- a/patches/header/gve_linux_version.h +++ b/patches/header/gve_linux_version.h @@ -33,6 +33,9 @@ #ifndef UTS_UBUNTU_RELEASE_ABI #define UTS_UBUNTU_RELEASE_ABI 0 #define UBUNTU_VERSION_CODE 0 +#elif !defined(CONFIG_VERSION_SIGNATURE) +#undef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 #else #define UBUNTU_VERSION_CODE (((LINUX_VERSION_CODE & ~0xFF) << 8) + (UTS_UBUNTU_RELEASE_ABI)) #endif /* UTS_UBUNTU_RELEASE_ABI */ diff --git a/patches/ndo_tx_timeout.cocci b/patches/ndo_tx_timeout.cocci index 7923530..7fb6f10 100644 --- a/patches/ndo_tx_timeout.cocci +++ b/patches/ndo_tx_timeout.cocci @@ -12,13 +12,13 @@ identifier assigned.tx_timeout; fresh identifier backport = "backport_" ## tx_timeout; @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3) || UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,4,0,1102)) static void tx_timeout(struct net_device *dev, unsigned int queue) { ... } -+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)) */ ++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3) || UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,4,0,1102)) */ +static void +backport(struct net_device *dev) +{ @@ -26,7 +26,7 @@ static void tx_timeout(struct net_device *dev, unsigned int queue) + gve_schedule_reset(priv); + priv->tx_timeo_cnt++; +} -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3) || UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,4,0,1102)) */ @ mod_assignment depends on assigned @ identifier assigned.ndo_struct; @@ -36,10 +36,10 @@ fresh identifier backport = "backport_" ## tx_timeout; struct net_device_ops ndo_struct = { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3) || UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,4,0,1102)) .ndo_tx_timeout = tx_timeout, -+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3) */ ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3) || UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,4,0,1102) */ + .ndo_tx_timeout = backport, -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3) || UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,4,0,1102)) */ }; diff --git a/patches/patch_xdp1.cocci b/patches/patch_xdp1.cocci index 8bde91d..428af68 100644 --- a/patches/patch_xdp1.cocci +++ b/patches/patch_xdp1.cocci @@ -1,46 +1,46 @@ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) #include -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) #include -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) #include -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) #include -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) ... -+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ + return 0; -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ } @@ @@ static void gve_unreg_xdp_info(struct gve_priv *priv) { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) ... -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ } @@ @@ -54,83 +54,83 @@ static void gve_drain_page_cache(struct gve_priv *priv) @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, struct netlink_ext_ack *extack) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_xsk_pool_enable(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_xsk_pool_disable(struct net_device *dev, u16 qid) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int verify_xdp_configuration(struct net_device *dev) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ identifier gve_netdev_ops; identifier gve_xdp, gve_xdp_xmit, gve_xsk_wakeup; @@ struct net_device_ops gve_netdev_ops = { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) .ndo_bpf = gve_xdp, .ndo_xdp_xmit = gve_xdp_xmit, .ndo_xsk_wakeup = gve_xsk_wakeup, -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ }; @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, int budget) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ type bool; @@ -140,12 +140,12 @@ identifier tx, repoll; bool gve_xdp_poll(struct gve_notify_block *block, int budget) { ... -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) if (tx->xsk_pool) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ /* If we still have work we want to repoll */ return repoll; } @@ -153,155 +153,155 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget) @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) if (xsk_complete > 0 && tx->xsk_pool) xsk_tx_completed(tx->xsk_pool, xsk_complete); -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, void *data, int len, void *frame_p) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, void *data, int len, void *frame_p, bool is_xsk) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_remove_xdp_queues(struct gve_priv *priv) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_add_xdp_queues(struct gve_priv *priv) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static void gve_free_xdp_qpls(struct gve_priv *priv) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_alloc_xdp_qpls(struct gve_priv *priv) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static void gve_free_xdp_rings(struct gve_priv *priv) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_destroy_xdp_rings(struct gve_priv *priv) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_alloc_xdp_rings(struct gve_priv *priv) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_create_xdp_rings(struct gve_priv *priv) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_unregister_xdp_qpls(struct gve_priv *priv) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_register_xdp_qpls(struct gve_priv *priv) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static void add_napi_init_xdp_sync_stats(struct gve_priv *priv, int (*napi_poll)(struct napi_struct *napi, int budget)) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) xdp_return_frame(info->xdp_frame); -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv) { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) ... -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) */ } diff --git a/patches/patch_xdp2.cocci b/patches/patch_xdp2.cocci index dae4cd5..0787e37 100644 --- a/patches/patch_xdp2.cocci +++ b/patches/patch_xdp2.cocci @@ -2,12 +2,12 @@ @@ struct gve_rx_ring { ... -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) /* XDP stuff */ struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xsk_rxq; struct xsk_buff_pool *xsk_pool; -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ ... }; @@ -15,15 +15,15 @@ struct gve_rx_ring { @@ struct gve_tx_ring { ... -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) struct xsk_buff_pool *xsk_pool; -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ ... }; @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_xsk_pool_redirect(struct net_device *dev, struct gve_rx_ring *rx, void *data, int len, @@ -31,77 +31,77 @@ static int gve_xsk_pool_redirect(struct net_device *dev, { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx, struct xdp_buff *orig, struct bpf_prog *xdp_prog) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ type bool; @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) static bool gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx, struct xdp_buff *xdp, struct bpf_prog *xprog, int xdp_act) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @ assign @ identifier xprog, xdp; @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) struct bpf_prog *xprog; struct xdp_buff xdp; -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @ gve_rx @ identifier assign.xprog, assign.xdp; identifier READ_ONCE; @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) xprog = READ_ONCE(priv->xdp_prog); if (xprog && is_only_frag) { ... } -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @ assign2 @ identifier xdp_redirects, rx; @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT]; -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ identifier assign2.xdp_redirects, assign2.rx; @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT]) xdp_do_flush(); -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @ assign3 @ identifier xdp_txs, rx; @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) u64 xdp_txs = rx->xdp_actions[XDP_TX]; -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ @@ identifier assign3.xdp_txs, assign3.rx; @@ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) if (xdp_txs != rx->xdp_actions[XDP_TX]) gve_xdp_tx_flush(priv, rx->q_num); -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) */ From cbdc45b2c38153ab50fa0eb0d1dd597d815080f1 Mon Sep 17 00:00:00 2001 From: Shailend Chand Date: Fri, 5 May 2023 14:52:56 -0700 Subject: [PATCH 09/28] gve: Secure enough bytes in the first TX desc for all TCP pkts Non-GSO TCP packets whose SKBs' linear portion did not include the entire TCP header were not populating the first Tx descriptor with as many bytes as the vNIC expected. This change ensures that all TCP packets populate the first descriptor with the correct number of bytes. Fixes: 893ce44df565 ("gve: Add basic driver framework for Compute Engine Virtual NIC") Signed-off-by: Shailend Chand Signed-off-by: Jakub Kicinski --- google/gve/gve.h | 2 +- google/gve/gve_tx.c | 16 +++++++--------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/google/gve/gve.h b/google/gve/gve.h index e214b51..98eb78d 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -49,7 +49,7 @@ #define GVE_XDP_ACTIONS 5 -#define GVE_TX_MAX_HEADER_SIZE 182 +#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ struct gve_rx_desc_queue { diff --git a/google/gve/gve_tx.c b/google/gve/gve_tx.c index e50510b..813da57 100644 --- a/google/gve/gve_tx.c +++ b/google/gve/gve_tx.c @@ -351,8 +351,8 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, int bytes; int hlen; - hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + - tcp_hdrlen(skb) : skb_headlen(skb); + hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) : + min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len); pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); @@ -522,13 +522,11 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st pkt_desc = &tx->desc[idx]; l4_hdr_offset = skb_checksum_start_offset(skb); - /* If the skb is gso, then we want the tcp header in the first segment - * otherwise we want the linear portion of the skb (which will contain - * the checksum because skb->csum_start and skb->csum_offset are given - * relative to skb->head) in the first segment. + /* If the skb is gso, then we want the tcp header alone in the first segment + * otherwise we want the minimum required by the gVNIC spec. */ hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : - skb_headlen(skb); + min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len); info->skb = skb; /* We don't want to split the header, so if necessary, pad to the end @@ -732,7 +730,7 @@ static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, u32 reqi = tx->req; pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len); - if (pad >= GVE_TX_MAX_HEADER_SIZE) + if (pad >= GVE_GQ_TX_MIN_PKT_DESC_BYTES) pad = 0; info = &tx->info[reqi & tx->mask]; info->xdp_frame = frame_p; @@ -812,7 +810,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, { int nsegs; - if (!gve_can_tx(tx, len + GVE_TX_MAX_HEADER_SIZE - 1)) + if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1)) return -EBUSY; nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false); From 841a847abe76e0613212fa409a244f42a7118dcb Mon Sep 17 00:00:00 2001 From: Jian Yang Date: Wed, 7 Dec 2022 15:13:52 -0800 Subject: [PATCH 10/28] Enable get_link_ksettings_func on CentOS 7 Signed-off-by: Jian Yang --- patches/ethtool_link_ksettings.cocci | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/patches/ethtool_link_ksettings.cocci b/patches/ethtool_link_ksettings.cocci index f8aaa65..661c8e2 100644 --- a/patches/ethtool_link_ksettings.cocci +++ b/patches/ethtool_link_ksettings.cocci @@ -4,19 +4,19 @@ identifier gve_ops, get_link_ksettings_func; const struct ethtool_ops gve_ops = { .set_priv_flags = gve_set_priv_flags, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 9) .get_link_ksettings = get_link_ksettings_func -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */ ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 9) */ }; @ function depends on remove_field @ identifier remove_field.get_link_ksettings_func, net_device, ethtool_link_ksettings, netdev, cmd; @@ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 9) static int get_link_ksettings_func(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { ... } -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */ ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 9) */ From 18ec5d0c64af1a2c8bfb56a146f9eccd476c41c5 Mon Sep 17 00:00:00 2001 From: Praveen Kaligineedi Date: Thu, 1 Jun 2023 13:59:56 -0700 Subject: [PATCH 11/28] Bump version to 1.4.0rc2 --- google/gve/gve_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index 19994f2..bf26372 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -26,7 +26,7 @@ #define GVE_DEFAULT_RX_COPYBREAK (256) #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK) -#define GVE_VERSION "1.4.0rc1" +#define GVE_VERSION "1.4.0rc2" #define GVE_VERSION_PREFIX "GVE-" // Minimum amount of time between queue kicks in msec (10 seconds) From a34c13f8fd802c3fccc56cbcff2fdeecd91f7c56 Mon Sep 17 00:00:00 2001 From: Ziwei Xiao Date: Thu, 25 May 2023 15:26:19 -0700 Subject: [PATCH 12/28] Add header split support - Add buffer size device option which can receive device's packet buffer size and header buffer size - Add header-split and strict-header-split priv flags - Add header split data path to use dma_pool for header buffers - Add max-rx-buffer-size priv flag to allow user to switch the packet buffer size between max and default(e.g. 4K <-> 2K) --- google/gve/gve.h | 50 +++++++++++++++++ google/gve/gve_adminq.c | 85 +++++++++++++++++++++++++---- google/gve/gve_adminq.h | 19 ++++++- google/gve/gve_ethtool.c | 99 ++++++++++++++++++++++++--------- google/gve/gve_main.c | 42 ++++++++------ google/gve/gve_rx_dqo.c | 115 +++++++++++++++++++++++++++++++++++---- google/gve/gve_utils.c | 16 ++++-- google/gve/gve_utils.h | 3 + 8 files changed, 361 insertions(+), 68 deletions(-) diff --git a/google/gve/gve.h b/google/gve/gve.h index 98eb78d..315eef2 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -46,6 +46,12 @@ #define GVE_NUM_PTYPES 1024 #define GVE_RX_BUFFER_SIZE_DQO 2048 +#define GVE_MIN_RX_BUFFER_SIZE 2048 +#define GVE_MAX_RX_BUFFER_SIZE 4096 + +#define GVE_HEADER_BUFFER_SIZE_MIN 64 +#define GVE_HEADER_BUFFER_SIZE_MAX 256 +#define GVE_HEADER_BUFFER_SIZE_DEFAULT 128 #define GVE_XDP_ACTIONS 5 @@ -124,6 +130,11 @@ struct gve_rx_compl_queue_dqo { u32 mask; /* Mask for indices to the size of the ring */ }; +struct gve_header_buf { + u8 *data; + dma_addr_t addr; +}; + /* Stores state for tracking buffers posted to HW */ struct gve_rx_buf_state_dqo { /* The page posted to HW. */ @@ -137,6 +148,9 @@ struct gve_rx_buf_state_dqo { */ u32 last_single_ref_offset; + /* Pointer to the header buffer when header-split is active */ + struct gve_header_buf *hdr_buf; + /* Linked list index to next element in the list, or -1 if none */ s16 next; }; @@ -217,19 +231,27 @@ struct gve_rx_ring { * which cannot be reused yet. */ struct gve_index_list used_buf_states; + + /* Array of buffers for header-split */ + struct gve_header_buf *hdr_bufs; } dqo; }; u64 rbytes; /* free-running bytes received */ + u64 rheader_bytes; /* free-running header bytes received */ u64 rpackets; /* free-running packets received */ u32 cnt; /* free-running total number of completed packets */ u32 fill_cnt; /* free-running total number of descs and buffs posted */ u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ + u64 rx_hsplit_pkt; /* free-running packets with headers split */ + u64 rx_hsplit_hbo_pkt; /* free-running packets with header buffer overflow */ u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ u64 rx_copied_pkt; /* free-running total number of copied packets */ u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ + /* free-running count of packets dropped by header-split overflow */ + u64 rx_hsplit_err_dropped_pkt; u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ @@ -614,6 +636,7 @@ struct gve_priv { u64 stats_report_len; dma_addr_t stats_report_bus; /* dma address for the stats report */ unsigned long ethtool_flags; + unsigned long ethtool_defaults; /* default flags */ unsigned long stats_report_timer_period; struct timer_list stats_report_timer; @@ -627,12 +650,20 @@ struct gve_priv { /* Must be a power of two. */ int data_buffer_size_dqo; + int dev_max_rx_buffer_size; /* The max rx buffer size that device support*/ enum gve_queue_format queue_format; /* Interrupt coalescing settings */ u32 tx_coalesce_usecs; u32 rx_coalesce_usecs; + + /* The size of buffers to allocate for the headers. + * A non-zero value enables header-split. + */ + u16 header_buf_size; + u8 header_split_strict; + struct dma_pool *header_buf_pool; }; enum gve_service_task_flags_bit { @@ -651,8 +682,17 @@ enum gve_state_flags_bit { enum gve_ethtool_flags_bit { GVE_PRIV_FLAGS_REPORT_STATS = 0, + GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT = 1, + GVE_PRIV_FLAGS_ENABLE_STRICT_HEADER_SPLIT = 2, + GVE_PRIV_FLAGS_ENABLE_MAX_RX_BUFFER_SIZE = 3, }; +#define GVE_PRIV_FLAGS_MASK \ + (BIT(GVE_PRIV_FLAGS_REPORT_STATS) | \ + BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT) | \ + BIT(GVE_PRIV_FLAGS_ENABLE_STRICT_HEADER_SPLIT) | \ + BIT(GVE_PRIV_FLAGS_ENABLE_MAX_RX_BUFFER_SIZE)) + static inline bool gve_get_do_reset(struct gve_priv *priv) { return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); @@ -786,6 +826,16 @@ static inline void gve_clear_report_stats(struct gve_priv *priv) clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); } +static inline bool gve_get_enable_header_split(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT, &priv->ethtool_flags); +} + +static inline bool gve_get_enable_max_rx_buffer_size(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_ENABLE_MAX_RX_BUFFER_SIZE, &priv->ethtool_flags); +} + /* Returns the address of the ntfy_blocks irq doorbell */ static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv, diff --git a/google/gve/gve_adminq.c b/google/gve/gve_adminq.c index 1a122ef..890fd86 100644 --- a/google/gve/gve_adminq.c +++ b/google/gve/gve_adminq.c @@ -39,7 +39,8 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option_gqi_rda **dev_op_gqi_rda, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_dqo_rda **dev_op_dqo_rda, - struct gve_device_option_jumbo_frames **dev_op_jumbo_frames) + struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, + struct gve_device_option_buffer_sizes **dev_op_buffer_sizes) { u32 req_feat_mask = be32_to_cpu(option->required_features_mask); u16 option_length = be16_to_cpu(option->option_length); @@ -130,6 +131,26 @@ void gve_parse_device_option(struct gve_priv *priv, } *dev_op_jumbo_frames = (void *)(option + 1); break; + case GVE_DEV_OPT_ID_BUFFER_SIZES: + if (option_length < sizeof(**dev_op_buffer_sizes) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "Buffer Sizes", + (int)sizeof(**dev_op_buffer_sizes), + GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_buffer_sizes)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, + "Buffer Sizes"); + } + *dev_op_buffer_sizes = (void *)(option + 1); + if ((*dev_op_buffer_sizes)->header_buffer_size) + priv->ethtool_defaults |= BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT); + break; default: /* If we don't recognize the option just continue * without doing anything. @@ -146,7 +167,8 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_option_gqi_rda **dev_op_gqi_rda, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_dqo_rda **dev_op_dqo_rda, - struct gve_device_option_jumbo_frames **dev_op_jumbo_frames) + struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, + struct gve_device_option_buffer_sizes **dev_op_buffer_sizes) { const int num_options = be16_to_cpu(descriptor->num_device_options); struct gve_device_option *dev_opt; @@ -166,7 +188,8 @@ gve_process_device_options(struct gve_priv *priv, gve_parse_device_option(priv, descriptor, dev_opt, dev_op_gqi_rda, dev_op_gqi_qpl, - dev_op_dqo_rda, dev_op_jumbo_frames); + dev_op_dqo_rda, dev_op_jumbo_frames, + dev_op_buffer_sizes); dev_opt = next_opt; } @@ -567,6 +590,9 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) cpu_to_be16(priv->options_dqo_rda.rx_buff_ring_entries); cmd.create_rx_queue.enable_rsc = !!(priv->dev->features & NETIF_F_LRO); + if (gve_get_enable_header_split(priv)) + cmd.create_rx_queue.header_buffer_size = + cpu_to_be16(priv->header_buf_size); } return gve_adminq_issue_cmd(priv, &cmd); @@ -684,12 +710,15 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv, return 0; } -static void gve_enable_supported_features(struct gve_priv *priv, - u32 supported_features_mask, - const struct gve_device_option_jumbo_frames - *dev_op_jumbo_frames) +static void gve_enable_supported_features( + struct gve_priv *priv, + u32 supported_features_mask, + const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames, + const struct gve_device_option_buffer_sizes *dev_op_buffer_sizes) { - /* Before control reaches this point, the page-size-capped max MTU from + int buf_size; + + /* Before control reaches this point, the page-size-capped max MTU in * the gve_device_descriptor field has already been stored in * priv->dev->max_mtu. We overwrite it with the true max MTU below. */ @@ -699,10 +728,44 @@ static void gve_enable_supported_features(struct gve_priv *priv, "JUMBO FRAMES device option enabled.\n"); priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu); } + + priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; + priv->dev_max_rx_buffer_size = GVE_RX_BUFFER_SIZE_DQO; + priv->header_buf_size = 0; + + if (dev_op_buffer_sizes && + (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) { + dev_info(&priv->pdev->dev, + "BUFFER SIZES device option enabled.\n"); + buf_size = be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size); + if (buf_size) { + priv->dev_max_rx_buffer_size = buf_size; + if (priv->dev_max_rx_buffer_size & + (priv->dev_max_rx_buffer_size - 1)) + priv->dev_max_rx_buffer_size = GVE_RX_BUFFER_SIZE_DQO; + if (priv->dev_max_rx_buffer_size < GVE_MIN_RX_BUFFER_SIZE) + priv->dev_max_rx_buffer_size = GVE_MIN_RX_BUFFER_SIZE; + if (priv->dev_max_rx_buffer_size > GVE_MAX_RX_BUFFER_SIZE) + priv->dev_max_rx_buffer_size = GVE_MAX_RX_BUFFER_SIZE; + } + buf_size = be16_to_cpu(dev_op_buffer_sizes->header_buffer_size); + if (buf_size) { + priv->header_buf_size = buf_size; + if (priv->header_buf_size & (priv->header_buf_size - 1)) + priv->header_buf_size = + GVE_HEADER_BUFFER_SIZE_DEFAULT; + if (priv->header_buf_size < GVE_HEADER_BUFFER_SIZE_MIN) + priv->header_buf_size = GVE_HEADER_BUFFER_SIZE_MIN; + if (priv->header_buf_size > GVE_HEADER_BUFFER_SIZE_MAX) + priv->header_buf_size = GVE_HEADER_BUFFER_SIZE_MAX; + } + } + } int gve_adminq_describe_device(struct gve_priv *priv) { + struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; @@ -733,7 +796,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, &dev_op_gqi_qpl, &dev_op_dqo_rda, - &dev_op_jumbo_frames); + &dev_op_jumbo_frames, + &dev_op_buffer_sizes); if (err) goto free_device_descriptor; @@ -798,7 +862,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); gve_enable_supported_features(priv, supported_features_mask, - dev_op_jumbo_frames); + dev_op_jumbo_frames, + dev_op_buffer_sizes); free_device_descriptor: dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor, diff --git a/google/gve/gve_adminq.h b/google/gve/gve_adminq.h index f894beb..89407d2 100644 --- a/google/gve/gve_adminq.h +++ b/google/gve/gve_adminq.h @@ -117,6 +117,14 @@ struct gve_device_option_jumbo_frames { static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8); +struct gve_device_option_buffer_sizes { + __be32 supported_features_mask; + __be16 packet_buffer_size; + __be16 header_buffer_size; +}; + +static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8); + /* Terminology: * * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA @@ -131,6 +139,7 @@ enum gve_dev_opt_id { GVE_DEV_OPT_ID_GQI_QPL = 0x3, GVE_DEV_OPT_ID_DQO_RDA = 0x4, GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, + GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, }; enum gve_dev_opt_req_feat_mask { @@ -139,10 +148,12 @@ enum gve_dev_opt_req_feat_mask { GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0, }; enum gve_sup_feature_mask { GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, + GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, }; #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 @@ -155,6 +166,7 @@ enum gve_driver_capbility { gve_driver_capability_dqo_qpl = 2, /* reserved for future use */ gve_driver_capability_dqo_rda = 3, gve_driver_capability_alt_miss_compl = 4, + gve_driver_capability_flexible_buffer_size = 5, }; #define GVE_CAP1(a) BIT((int)a) @@ -166,7 +178,8 @@ enum gve_driver_capbility { (GVE_CAP1(gve_driver_capability_gqi_qpl) | \ GVE_CAP1(gve_driver_capability_gqi_rda) | \ GVE_CAP1(gve_driver_capability_dqo_rda) | \ - GVE_CAP1(gve_driver_capability_alt_miss_compl)) + GVE_CAP1(gve_driver_capability_alt_miss_compl) | \ + GVE_CAP1(gve_driver_capability_flexible_buffer_size)) #define GVE_DRIVER_CAPABILITY_FLAGS2 0x0 #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0 @@ -249,7 +262,9 @@ struct gve_adminq_create_rx_queue { __be16 packet_buffer_size; __be16 rx_buff_ring_size; u8 enable_rsc; - u8 padding[5]; + u8 padding1; + __be16 header_buffer_size; + u8 padding2[2]; }; static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56); diff --git a/google/gve/gve_ethtool.c b/google/gve/gve_ethtool.c index cfd4b8d..38f2e1a 100644 --- a/google/gve/gve_ethtool.c +++ b/google/gve/gve_ethtool.c @@ -40,15 +40,17 @@ static u32 gve_get_msglevel(struct net_device *netdev) * as declared in enum xdp_action inside file uapi/linux/bpf.h . */ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { - "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", - "rx_dropped", "tx_dropped", "tx_timeouts", + "rx_packets", "rx_packets_sph", "rx_packets_hbo", "tx_packets", + "rx_bytes", "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts", "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt", + "rx_hsplit_err_dropped_pkt", "interface_up_cnt", "interface_down_cnt", "reset_cnt", "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt", }; static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { - "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]", + "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", + "rx_bytes[%u]", "rx_header_bytes[%u]", "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]", "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", @@ -77,7 +79,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { }; static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { - "report-stats", + "report-stats", "enable-header-split", "enable-strict-header-split", + "enable-max-rx-buffer-size" }; #define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats) @@ -154,11 +157,13 @@ static void gve_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { - u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, - tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt, + u64 tmp_rx_pkts, tmp_rx_pkts_sph, tmp_rx_pkts_hbo, tmp_rx_bytes, + tmp_rx_hbytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail, + tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes; - u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts, - rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped; + u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_err_dropped_pkt, + rx_pkts, rx_pkts_sph, rx_pkts_hbo, rx_skb_alloc_fail, rx_bytes, + tx_pkts, tx_bytes, tx_dropped; int stats_idx, base_stats_idx, max_stats_idx; struct stats *report_stats; int *rx_qid_to_stats_idx; @@ -185,8 +190,10 @@ gve_get_ethtool_stats(struct net_device *netdev, kfree(rx_qid_to_stats_idx); return; } - for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0, - rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0; + for (rx_pkts = 0, rx_bytes = 0, rx_pkts_sph = 0, rx_pkts_hbo = 0, + rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0, + rx_desc_err_dropped_pkt = 0, rx_hsplit_err_dropped_pkt = 0, + ring = 0; ring < priv->rx_cfg.num_queues; ring++) { if (priv->rx) { do { @@ -195,18 +202,25 @@ gve_get_ethtool_stats(struct net_device *netdev, start = u64_stats_fetch_begin(&priv->rx[ring].statss); tmp_rx_pkts = rx->rpackets; + tmp_rx_pkts_sph = rx->rx_hsplit_pkt; + tmp_rx_pkts_hbo = rx->rx_hsplit_hbo_pkt; tmp_rx_bytes = rx->rbytes; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_desc_err_dropped_pkt = rx->rx_desc_err_dropped_pkt; + tmp_rx_hsplit_err_dropped_pkt = + rx->rx_hsplit_err_dropped_pkt; } while (u64_stats_fetch_retry(&priv->rx[ring].statss, start)); rx_pkts += tmp_rx_pkts; + rx_pkts_sph += tmp_rx_pkts_sph; + rx_pkts_hbo += tmp_rx_pkts_hbo; rx_bytes += tmp_rx_bytes; rx_skb_alloc_fail += tmp_rx_skb_alloc_fail; rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; + rx_hsplit_err_dropped_pkt += tmp_rx_hsplit_err_dropped_pkt; } } for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; @@ -227,6 +241,8 @@ gve_get_ethtool_stats(struct net_device *netdev, i = 0; data[i++] = rx_pkts; + data[i++] = rx_pkts_sph; + data[i++] = rx_pkts_hbo; data[i++] = tx_pkts; data[i++] = rx_bytes; data[i++] = tx_bytes; @@ -238,6 +254,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = rx_skb_alloc_fail; data[i++] = rx_buf_alloc_fail; data[i++] = rx_desc_err_dropped_pkt; + data[i++] = rx_hsplit_err_dropped_pkt; data[i++] = priv->interface_up_cnt; data[i++] = priv->interface_down_cnt; data[i++] = priv->reset_cnt; @@ -277,6 +294,7 @@ gve_get_ethtool_stats(struct net_device *netdev, start = u64_stats_fetch_begin(&priv->rx[ring].statss); tmp_rx_bytes = rx->rbytes; + tmp_rx_hbytes = rx->rheader_bytes; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_desc_err_dropped_pkt = @@ -284,6 +302,7 @@ gve_get_ethtool_stats(struct net_device *netdev, } while (u64_stats_fetch_retry(&priv->rx[ring].statss, start)); data[i++] = tmp_rx_bytes; + data[i++] = tmp_rx_hbytes; data[i++] = rx->rx_cont_packet_cnt; data[i++] = rx->rx_frag_flip_cnt; data[i++] = rx->rx_frag_copy_cnt; @@ -535,30 +554,47 @@ static int gve_set_tunable(struct net_device *netdev, static u32 gve_get_priv_flags(struct net_device *netdev) { struct gve_priv *priv = netdev_priv(netdev); - u32 ret_flags = 0; - - /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */ - if (priv->ethtool_flags & BIT(0)) - ret_flags |= BIT(0); - return ret_flags; + return priv->ethtool_flags & GVE_PRIV_FLAGS_MASK; } static int gve_set_priv_flags(struct net_device *netdev, u32 flags) { struct gve_priv *priv = netdev_priv(netdev); - u64 ori_flags, new_flags; + bool need_adjust_queues = false; + u64 ori_flags, flag_diff; int num_tx_queues; + /* If turning off header split, strict header split will be turned off too*/ + if (gve_get_enable_header_split(priv) && + !(flags & BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT))) { + flags &= ~BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT); + flags &= ~BIT(GVE_PRIV_FLAGS_ENABLE_STRICT_HEADER_SPLIT); + } + + /* If strict header-split is requested, turn on regular header-split */ + if (flags & BIT(GVE_PRIV_FLAGS_ENABLE_STRICT_HEADER_SPLIT)) + flags |= BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT); + + /* Make sure header-split is available */ + if ((flags & BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT)) && + !(priv->ethtool_defaults & BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT))) { + dev_err(&priv->pdev->dev, + "Header-split not available\n"); + return -EINVAL; + } + + if ((flags & BIT(GVE_PRIV_FLAGS_ENABLE_MAX_RX_BUFFER_SIZE)) && + priv->dev_max_rx_buffer_size <= GVE_MIN_RX_BUFFER_SIZE) { + dev_err(&priv->pdev->dev, + "Max-rx-buffer-size not available\n"); + return -EINVAL; + } + num_tx_queues = gve_num_tx_queues(priv); ori_flags = READ_ONCE(priv->ethtool_flags); - new_flags = ori_flags; - - /* Only one priv flag exists: report-stats (BIT(0))*/ - if (flags & BIT(0)) - new_flags |= BIT(0); - else - new_flags &= ~(BIT(0)); - priv->ethtool_flags = new_flags; + + priv->ethtool_flags = flags & GVE_PRIV_FLAGS_MASK; + /* start report-stats timer when user turns report stats on. */ if (flags & BIT(0)) { mod_timer(&priv->stats_report_timer, @@ -577,6 +613,19 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags) sizeof(struct stats)); del_timer_sync(&priv->stats_report_timer); } + priv->header_split_strict = + (priv->ethtool_flags & + BIT(GVE_PRIV_FLAGS_ENABLE_STRICT_HEADER_SPLIT)) ? true : false; + + flag_diff = priv->ethtool_flags ^ ori_flags; + + if ((flag_diff & BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT)) || + (flag_diff & BIT(GVE_PRIV_FLAGS_ENABLE_MAX_RX_BUFFER_SIZE))) + need_adjust_queues = true; + + if (need_adjust_queues) + return gve_adjust_queues(priv, priv->rx_cfg, priv->tx_cfg); + return 0; } diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index bf26372..dfb4b8d 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -1309,12 +1309,6 @@ static int gve_open(struct net_device *dev) if (err) goto reset; - if (!gve_is_gqi(priv)) { - /* Hard code this for now. This may be tuned in the future for - * performance. - */ - priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; - } err = gve_create_rings(priv); if (err) goto reset; @@ -1687,36 +1681,51 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +static int gve_adjust_queue_count(struct gve_priv *priv, + struct gve_queue_config new_rx_config, + struct gve_queue_config new_tx_config) +{ + int err = 0; + + priv->rx_cfg = new_rx_config; + priv->tx_cfg = new_tx_config; + + if (gve_get_enable_max_rx_buffer_size(priv)) + priv->data_buffer_size_dqo = GVE_MAX_RX_BUFFER_SIZE; + else + priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; + + return err; +} + int gve_adjust_queues(struct gve_priv *priv, struct gve_queue_config new_rx_config, struct gve_queue_config new_tx_config) { int err; - if (netif_carrier_ok(priv->dev)) { /* To make this process as simple as possible we teardown the * device, set the new configuration, and then bring the device * up again. */ err = gve_close(priv->dev); - /* we have already tried to reset in close, - * just fail at this point + /* We have already tried to reset in close, just fail at this + * point. */ if (err) return err; - priv->tx_cfg = new_tx_config; - priv->rx_cfg = new_rx_config; - + err = gve_adjust_queue_count(priv, new_rx_config, new_tx_config); + if (err) + goto err; err = gve_open(priv->dev); if (err) goto err; - return 0; } /* Set the config for the next up. */ - priv->tx_cfg = new_tx_config; - priv->rx_cfg = new_rx_config; - + err = gve_adjust_queue_count(priv, new_rx_config, new_tx_config); + if (err) + goto err; return 0; err: netif_err(priv, drv, priv->dev, @@ -2277,6 +2286,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) priv->service_task_flags = 0x0; priv->state_flags = 0x0; priv->ethtool_flags = 0x0; + priv->ethtool_defaults = 0x0; gve_set_probe_in_progress(priv); priv->gve_wq = alloc_ordered_workqueue("gve", 0); diff --git a/google/gve/gve_rx_dqo.c b/google/gve/gve_rx_dqo.c index e57b73e..f18a35a 100644 --- a/google/gve/gve_rx_dqo.c +++ b/google/gve/gve_rx_dqo.c @@ -109,6 +109,13 @@ static void gve_enqueue_buf_state(struct gve_rx_ring *rx, } } +static void gve_recycle_buf(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + buf_state->hdr_buf = NULL; + gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); +} + static struct gve_rx_buf_state_dqo * gve_get_recycled_buf_state(struct gve_rx_ring *rx) { @@ -218,6 +225,16 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx) kvfree(rx->dqo.buf_states); rx->dqo.buf_states = NULL; + if (rx->dqo.hdr_bufs) { + for (i = 0; i < buffer_queue_slots; i++) + if (rx->dqo.hdr_bufs[i].data) + dma_pool_free(priv->header_buf_pool, + rx->dqo.hdr_bufs[i].data, + rx->dqo.hdr_bufs[i].addr); + kvfree(rx->dqo.hdr_bufs); + rx->dqo.hdr_bufs = NULL; + } + netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); } @@ -250,6 +267,23 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) if (!rx->dqo.buf_states) return -ENOMEM; + /* Allocate header buffers for header-split */ + if (priv->header_buf_pool) { + rx->dqo.hdr_bufs = kvcalloc(buffer_queue_slots, + sizeof(rx->dqo.hdr_bufs[0]), + GFP_KERNEL); + if (!rx->dqo.hdr_bufs) + goto err; + for (i = 0; i < buffer_queue_slots; i++) { + rx->dqo.hdr_bufs[i].data = + dma_pool_alloc(priv->header_buf_pool, + GFP_KERNEL, + &rx->dqo.hdr_bufs[i].addr); + if (!rx->dqo.hdr_bufs[i].data) + goto err; + } + } + /* Set up linked list of buffer IDs */ for (i = 0; i < rx->dqo.num_buf_states - 1; i++) rx->dqo.buf_states[i].next = i + 1; @@ -300,7 +334,18 @@ void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx) int gve_rx_alloc_rings_dqo(struct gve_priv *priv) { int err = 0; - int i; + int i = 0; + + if (gve_get_enable_header_split(priv)) { + priv->header_buf_pool = dma_pool_create("header_bufs", + &priv->pdev->dev, + priv->header_buf_size, + 64, 0); + if (!priv->header_buf_pool) { + err = -ENOMEM; + goto err; + } + } for (i = 0; i < priv->rx_cfg.num_queues; i++) { err = gve_rx_alloc_ring_dqo(priv, i); @@ -327,6 +372,9 @@ void gve_rx_free_rings_dqo(struct gve_priv *priv) for (i = 0; i < priv->rx_cfg.num_queues; i++) gve_rx_free_ring_dqo(priv, i); + + dma_pool_destroy(priv->header_buf_pool); + priv->header_buf_pool = NULL; } void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) @@ -364,6 +412,12 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); desc->buf_addr = cpu_to_le64(buf_state->addr + buf_state->page_info.page_offset); + if (rx->dqo.hdr_bufs) { + struct gve_header_buf *hdr_buf = + &rx->dqo.hdr_bufs[bufq->tail]; + buf_state->hdr_buf = hdr_buf; + desc->header_buf_addr = cpu_to_le64(hdr_buf->addr); + } bufq->tail = (bufq->tail + 1) & bufq->mask; complq->num_free_slots--; @@ -410,7 +464,7 @@ static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, goto mark_used; } - gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); + gve_recycle_buf(rx, buf_state); return; mark_used: @@ -520,10 +574,13 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, int queue_idx) { const u16 buffer_id = le16_to_cpu(compl_desc->buf_id); + const bool hbo = compl_desc->header_buffer_overflow != 0; const bool eop = compl_desc->end_of_packet != 0; + const bool sph = compl_desc->split_header != 0; struct gve_rx_buf_state_dqo *buf_state; struct gve_priv *priv = rx->gve; u16 buf_len; + u16 hdr_len; if (unlikely(buffer_id >= rx->dqo.num_buf_states)) { net_err_ratelimited("%s: Invalid RX buffer_id=%u\n", @@ -538,18 +595,56 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, } if (unlikely(compl_desc->rx_error)) { - gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, - buf_state); + net_err_ratelimited("%s: Descriptor error=%u\n", + priv->dev->name, compl_desc->rx_error); + gve_recycle_buf(rx, buf_state); return -EINVAL; } buf_len = compl_desc->packet_len; + hdr_len = compl_desc->header_len; + + if (unlikely(sph && !hdr_len)) { + gve_recycle_buf(rx, buf_state); + return -EINVAL; + } + + if (unlikely(hdr_len && buf_state->hdr_buf == NULL)) { + gve_recycle_buf(rx, buf_state); + return -EINVAL; + } + + if (unlikely(hbo && priv->header_split_strict)) { + gve_recycle_buf(rx, buf_state); + return -EFAULT; + } /* Page might have not been used for awhile and was likely last written * by a different thread. */ prefetch(buf_state->page_info.page); + /* Copy the header into the skb in the case of header split */ + if (sph) { + dma_sync_single_for_cpu(&priv->pdev->dev, + buf_state->hdr_buf->addr, + hdr_len, DMA_FROM_DEVICE); + + rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi, + buf_state->hdr_buf->data, + hdr_len); + if (unlikely(!rx->ctx.skb_head)) + goto error; + + rx->ctx.skb_tail = rx->ctx.skb_head; + + u64_stats_update_begin(&rx->statss); + rx->rx_hsplit_pkt++; + rx->rx_hsplit_hbo_pkt += hbo; + rx->rheader_bytes += hdr_len; + u64_stats_update_end(&rx->statss); + } + /* Sync the portion of dma buffer for CPU to read. */ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr, buf_state->page_info.page_offset, @@ -558,10 +653,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, /* Append to current skb if one exists. */ if (rx->ctx.skb_head) { if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, - priv)) != 0) { + priv)) != 0) goto error; - } - gve_try_recycle_buf(priv, rx, buf_state); return 0; } @@ -578,8 +671,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, rx->rx_copybreak_pkt++; u64_stats_update_end(&rx->statss); - gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, - buf_state); + gve_recycle_buf(rx, buf_state); return 0; } @@ -597,7 +689,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, return 0; error: - gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); + dev_err(&priv->pdev->dev, "%s: Error return", priv->dev->name); + gve_recycle_buf(rx, buf_state); return -ENOMEM; } @@ -696,6 +789,8 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) rx->rx_skb_alloc_fail++; else if (err == -EINVAL) rx->rx_desc_err_dropped_pkt++; + else if (err == -EFAULT) + rx->rx_hsplit_err_dropped_pkt++; u64_stats_update_end(&rx->statss); } diff --git a/google/gve/gve_utils.c b/google/gve/gve_utils.c index 26e08d7..5e6275e 100644 --- a/google/gve/gve_utils.c +++ b/google/gve/gve_utils.c @@ -48,11 +48,9 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) rx->ntfy_id = ntfy_idx; } -struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, - struct gve_rx_slot_page_info *page_info, u16 len) +struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi, + u8 *data, u16 len) { - void *va = page_info->page_address + page_info->page_offset + - page_info->pad; struct sk_buff *skb; skb = napi_alloc_skb(napi, len); @@ -60,12 +58,20 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, return NULL; __skb_put(skb, len); - skb_copy_to_linear_data_offset(skb, 0, va, len); + skb_copy_to_linear_data_offset(skb, 0, data, len); skb->protocol = eth_type_trans(skb, dev); return skb; } +struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, + struct gve_rx_slot_page_info *page_info, u16 len) +{ + void *va = page_info->page_address + page_info->page_offset + + page_info->pad; + return gve_rx_copy_data(dev, napi, va, len); +} + void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info) { page_info->pagecnt_bias--; diff --git a/google/gve/gve_utils.h b/google/gve/gve_utils.h index 324fd98..6131aef 100644 --- a/google/gve/gve_utils.h +++ b/google/gve/gve_utils.h @@ -17,6 +17,9 @@ void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx); void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx); void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); +struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi, + u8 *data, u16 len); + struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, u16 len); From d8653309980460d95641fb2501b880d199509bbb Mon Sep 17 00:00:00 2001 From: Ziwei Xiao Date: Thu, 25 May 2023 15:27:18 -0700 Subject: [PATCH 13/28] Add flow steering support - Add extended adminq command for commands that are larger than 56 bytes entry - Add flow steering device option to signal whether device supports flow steering - Add flow steering adminq command to add, delete and reset the rules - Add flow steering support to ethtool --- google/gve/gve.h | 36 +++ google/gve/gve_adminq.c | 183 +++++++++++- google/gve/gve_adminq.h | 80 ++++++ google/gve/gve_ethtool.c | 541 ++++++++++++++++++++++++++++++++++- google/gve/gve_main.c | 28 ++ patches/ethtool_ntuple.cocci | 33 +++ 6 files changed, 893 insertions(+), 8 deletions(-) create mode 100644 patches/ethtool_ntuple.cocci diff --git a/google/gve/gve.h b/google/gve/gve.h index 315eef2..abad376 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -555,6 +555,31 @@ enum gve_queue_format { GVE_DQO_RDA_FORMAT = 0x3, }; +struct gve_flow_spec { + __be32 src_ip[4]; + __be32 dst_ip[4]; + union { + struct { + __be16 src_port; + __be16 dst_port; + }; + __be32 spi; + }; + union { + u8 tos; + u8 tclass; + }; +}; + +struct gve_flow_rule { + struct list_head list; + u16 loc; + u16 flow_type; + u16 action; + struct gve_flow_spec key; + struct gve_flow_spec mask; +}; + struct gve_priv { struct net_device *dev; struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ @@ -616,6 +641,7 @@ struct gve_priv { u32 adminq_report_link_speed_cnt; u32 adminq_get_ptype_map_cnt; u32 adminq_verify_driver_compatibility_cnt; + u32 adminq_cfg_flow_rule_cnt; /* Global stats */ u32 interface_up_cnt; /* count of times interface turned up since last reset */ @@ -664,6 +690,14 @@ struct gve_priv { u16 header_buf_size; u8 header_split_strict; struct dma_pool *header_buf_pool; + + /* The maximum number of rules for flow-steering. + * A non-zero value enables flow-steering. + */ + u16 flow_rules_max; + u16 flow_rules_cnt; + struct list_head flow_rules; + spinlock_t flow_rules_lock; }; enum gve_service_task_flags_bit { @@ -1009,6 +1043,8 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown); int gve_adjust_queues(struct gve_priv *priv, struct gve_queue_config new_rx_config, struct gve_queue_config new_tx_config); +int gve_flow_rules_reset(struct gve_priv *priv); + /* report stats handling */ void gve_handle_report_stats(struct gve_priv *priv); /* exported by ethtool.c */ diff --git a/google/gve/gve_adminq.c b/google/gve/gve_adminq.c index 890fd86..8967f57 100644 --- a/google/gve/gve_adminq.c +++ b/google/gve/gve_adminq.c @@ -40,7 +40,8 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_dqo_rda **dev_op_dqo_rda, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, - struct gve_device_option_buffer_sizes **dev_op_buffer_sizes) + struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, + struct gve_device_option_flow_steering **dev_op_flow_steering) { u32 req_feat_mask = be32_to_cpu(option->required_features_mask); u16 option_length = be16_to_cpu(option->option_length); @@ -151,6 +152,24 @@ void gve_parse_device_option(struct gve_priv *priv, if ((*dev_op_buffer_sizes)->header_buffer_size) priv->ethtool_defaults |= BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT); break; + case GVE_DEV_OPT_ID_FLOW_STEERING: + if (option_length < sizeof(**dev_op_flow_steering) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "Flow Steering", + (int)sizeof(**dev_op_flow_steering), + GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_flow_steering)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, + "Flow Steering"); + } + *dev_op_flow_steering = (void *)(option + 1); + break; default: /* If we don't recognize the option just continue * without doing anything. @@ -168,7 +187,8 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_dqo_rda **dev_op_dqo_rda, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, - struct gve_device_option_buffer_sizes **dev_op_buffer_sizes) + struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, + struct gve_device_option_flow_steering **dev_op_flow_steering) { const int num_options = be16_to_cpu(descriptor->num_device_options); struct gve_device_option *dev_opt; @@ -189,7 +209,7 @@ gve_process_device_options(struct gve_priv *priv, gve_parse_device_option(priv, descriptor, dev_opt, dev_op_gqi_rda, dev_op_gqi_qpl, dev_op_dqo_rda, dev_op_jumbo_frames, - dev_op_buffer_sizes); + dev_op_buffer_sizes, dev_op_flow_steering); dev_opt = next_opt; } @@ -220,6 +240,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) priv->adminq_report_stats_cnt = 0; priv->adminq_report_link_speed_cnt = 0; priv->adminq_get_ptype_map_cnt = 0; + priv->adminq_cfg_flow_rule_cnt = 0; /* Setup Admin queue with the device */ iowrite32be(priv->adminq_bus_addr / PAGE_SIZE, @@ -389,6 +410,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, memcpy(cmd, cmd_orig, sizeof(*cmd_orig)); opcode = be32_to_cpu(READ_ONCE(cmd->opcode)); + if (opcode == GVE_ADMINQ_EXTENDED_COMMAND) + opcode = be32_to_cpu(cmd->extended_command.inner_opcode); switch (opcode) { case GVE_ADMINQ_DESCRIBE_DEVICE: @@ -433,6 +456,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY: priv->adminq_verify_driver_compatibility_cnt++; break; + case GVE_ADMINQ_CONFIGURE_FLOW_RULE: + priv->adminq_cfg_flow_rule_cnt++; + break; default: dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); } @@ -464,6 +490,39 @@ static int gve_adminq_execute_cmd(struct gve_priv *priv, return gve_adminq_kick_and_wait(priv); } +static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, + uint32_t opcode, size_t cmd_size, + void *cmd_orig) +{ + union gve_adminq_command cmd; + dma_addr_t inner_cmd_bus; + void *inner_cmd; + int err; + + inner_cmd = dma_alloc_coherent(&priv->pdev->dev, cmd_size, + &inner_cmd_bus, GFP_KERNEL); + if (!inner_cmd) + return -ENOMEM; + + memcpy(inner_cmd, cmd_orig, cmd_size); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_EXTENDED_COMMAND); + cmd.extended_command = (struct gve_adminq_extended_command) { + .inner_opcode = cpu_to_be32(opcode), + .inner_length = cpu_to_be32(cmd_size), + .inner_command_addr = cpu_to_be64(inner_cmd_bus), + }; + + err = gve_adminq_execute_cmd(priv, &cmd); + + dma_free_coherent(&priv->pdev->dev, + cmd_size, + inner_cmd, inner_cmd_bus); + return err; +} + + /* The device specifies that the management vector can either be the first irq * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then @@ -714,7 +773,8 @@ static void gve_enable_supported_features( struct gve_priv *priv, u32 supported_features_mask, const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames, - const struct gve_device_option_buffer_sizes *dev_op_buffer_sizes) + const struct gve_device_option_buffer_sizes *dev_op_buffer_sizes, + const struct gve_device_option_flow_steering *dev_op_flow_steering) { int buf_size; @@ -761,10 +821,19 @@ static void gve_enable_supported_features( } } + if (dev_op_flow_steering && + (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) { + dev_info(&priv->pdev->dev, + "FLOW STEERING device option enabled.\n"); + priv->flow_rules_max = + be16_to_cpu(dev_op_flow_steering->max_num_rules); + } + } int gve_adminq_describe_device(struct gve_priv *priv) { + struct gve_device_option_flow_steering *dev_op_flow_steering = NULL; struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; @@ -797,7 +866,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, &dev_op_gqi_qpl, &dev_op_dqo_rda, &dev_op_jumbo_frames, - &dev_op_buffer_sizes); + &dev_op_buffer_sizes, + &dev_op_flow_steering); if (err) goto free_device_descriptor; @@ -831,8 +901,9 @@ int gve_adminq_describe_device(struct gve_priv *priv) if (gve_is_gqi(priv)) { err = gve_set_desc_cnt(priv, descriptor); } else { - /* DQO supports LRO. */ + /* DQO supports LRO and flow-steering */ priv->dev->hw_features |= NETIF_F_LRO; + priv->dev->hw_features |= NETIF_F_NTUPLE; err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda); } if (err) @@ -863,7 +934,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) gve_enable_supported_features(priv, supported_features_mask, dev_op_jumbo_frames, - dev_op_buffer_sizes); + dev_op_buffer_sizes, + dev_op_flow_steering); free_device_descriptor: dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor, @@ -1026,3 +1098,100 @@ int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, ptype_map_bus); return err; } + +static int gve_adminq_configure_flow_rule(struct gve_priv *priv, + struct gve_adminq_configure_flow_rule *flow_rule_cmd) +{ + return gve_adminq_execute_extended_cmd(priv, + GVE_ADMINQ_CONFIGURE_FLOW_RULE, + sizeof(struct gve_adminq_configure_flow_rule), + flow_rule_cmd); +} + +int gve_adminq_add_flow_rule(struct gve_priv *priv, + struct gve_flow_rule *rule) +{ + struct gve_adminq_configure_flow_rule flow_rule_cmd = { + .cmd = cpu_to_be16(GVE_RULE_ADD), + .loc = cpu_to_be16(rule->loc), + .rule = { + .flow_type = cpu_to_be16(rule->flow_type), + .action = cpu_to_be16(rule->action), + .key = { + .src_ip = { rule->key.src_ip[0], + rule->key.src_ip[1], + rule->key.src_ip[2], + rule->key.src_ip[3] }, + .dst_ip = { rule->key.dst_ip[0], + rule->key.dst_ip[1], + rule->key.dst_ip[2], + rule->key.dst_ip[3] }, + }, + .mask = { + .src_ip = { rule->mask.src_ip[0], + rule->mask.src_ip[1], + rule->mask.src_ip[2], + rule->mask.src_ip[3] }, + .dst_ip = { rule->mask.dst_ip[0], + rule->mask.dst_ip[1], + rule->mask.dst_ip[2], + rule->mask.dst_ip[3] }, + }, + }, + }; + switch (rule->flow_type) { + case GVE_FLOW_TYPE_TCPV4: + case GVE_FLOW_TYPE_UDPV4: + case GVE_FLOW_TYPE_SCTPV4: + flow_rule_cmd.rule.key.src_port = rule->key.src_port; + flow_rule_cmd.rule.key.dst_port = rule->key.dst_port; + flow_rule_cmd.rule.key.tos = rule->key.tos; + flow_rule_cmd.rule.mask.src_port = rule->mask.src_port; + flow_rule_cmd.rule.mask.dst_port = rule->mask.dst_port; + flow_rule_cmd.rule.mask.tos = rule->mask.tos; + break; + case GVE_FLOW_TYPE_AHV4: + case GVE_FLOW_TYPE_ESPV4: + flow_rule_cmd.rule.key.spi = rule->key.spi; + flow_rule_cmd.rule.key.tos = rule->key.tos; + flow_rule_cmd.rule.mask.spi = rule->mask.spi; + flow_rule_cmd.rule.mask.tos = rule->mask.tos; + break; + case GVE_FLOW_TYPE_TCPV6: + case GVE_FLOW_TYPE_UDPV6: + case GVE_FLOW_TYPE_SCTPV6: + flow_rule_cmd.rule.key.src_port = rule->key.src_port; + flow_rule_cmd.rule.key.dst_port = rule->key.dst_port; + flow_rule_cmd.rule.key.tclass = rule->key.tclass; + flow_rule_cmd.rule.mask.src_port = rule->mask.src_port; + flow_rule_cmd.rule.mask.dst_port = rule->mask.dst_port; + flow_rule_cmd.rule.mask.tclass = rule->mask.tclass; + break; + case GVE_FLOW_TYPE_AHV6: + case GVE_FLOW_TYPE_ESPV6: + flow_rule_cmd.rule.key.spi = rule->key.spi; + flow_rule_cmd.rule.key.tclass = rule->key.tclass; + flow_rule_cmd.rule.mask.spi = rule->mask.spi; + flow_rule_cmd.rule.mask.tclass = rule->mask.tclass; + break; + } + + return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); +} + +int gve_adminq_del_flow_rule(struct gve_priv *priv, int loc) +{ + struct gve_adminq_configure_flow_rule flow_rule_cmd = { + .cmd = cpu_to_be16(GVE_RULE_DEL), + .loc = cpu_to_be16(loc), + }; + return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); +} + +int gve_adminq_reset_flow_rules(struct gve_priv *priv) +{ + struct gve_adminq_configure_flow_rule flow_rule_cmd = { + .cmd = cpu_to_be16(GVE_RULE_RESET), + }; + return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); +} diff --git a/google/gve/gve_adminq.h b/google/gve/gve_adminq.h index 89407d2..279ab6b 100644 --- a/google/gve/gve_adminq.h +++ b/google/gve/gve_adminq.h @@ -25,6 +25,11 @@ enum gve_adminq_opcodes { GVE_ADMINQ_REPORT_LINK_SPEED = 0xD, GVE_ADMINQ_GET_PTYPE_MAP = 0xE, GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF, + + /* For commands that are larger than 56 bytes */ + GVE_ADMINQ_EXTENDED_COMMAND = 0xFF, + + GVE_ADMINQ_CONFIGURE_FLOW_RULE = 0x101, }; /* Admin queue status codes */ @@ -125,6 +130,14 @@ struct gve_device_option_buffer_sizes { static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8); +struct gve_device_option_flow_steering { + __be32 supported_features_mask; + __be16 max_num_rules; + u8 padding[2]; +}; + +static_assert(sizeof(struct gve_device_option_flow_steering) == 8); + /* Terminology: * * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA @@ -140,6 +153,7 @@ enum gve_dev_opt_id { GVE_DEV_OPT_ID_DQO_RDA = 0x4, GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, + GVE_DEV_OPT_ID_FLOW_STEERING = 0xb, }; enum gve_dev_opt_req_feat_mask { @@ -149,11 +163,13 @@ enum gve_dev_opt_req_feat_mask { GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0, }; enum gve_sup_feature_mask { GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, + GVE_SUP_FLOW_STEERING_MASK = 1 << 5, }; #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 @@ -185,6 +201,13 @@ enum gve_driver_capbility { #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0 #define GVE_DRIVER_CAPABILITY_FLAGS4 0x0 +struct gve_adminq_extended_command { + __be32 inner_opcode; + __be32 inner_length; + __be64 inner_command_addr; +}; +static_assert(sizeof(struct gve_adminq_extended_command) == 16); + struct gve_driver_info { u8 os_type; /* 0x01 = Linux */ u8 driver_major; @@ -388,6 +411,58 @@ struct gve_adminq_get_ptype_map { __be64 ptype_map_addr; }; +/* Flow-steering related definitions */ +enum gve_adminq_flow_rule_cmd { + GVE_RULE_ADD = 0, + GVE_RULE_DEL = 1, + GVE_RULE_RESET = 2, +}; + +enum gve_adminq_flow_type { + GVE_FLOW_TYPE_TCPV4 = 0, + GVE_FLOW_TYPE_UDPV4 = 1, + GVE_FLOW_TYPE_SCTPV4 = 2, + GVE_FLOW_TYPE_AHV4 = 3, + GVE_FLOW_TYPE_ESPV4 = 4, + GVE_FLOW_TYPE_TCPV6 = 5, + GVE_FLOW_TYPE_UDPV6 = 6, + GVE_FLOW_TYPE_SCTPV6 = 7, + GVE_FLOW_TYPE_AHV6 = 8, + GVE_FLOW_TYPE_ESPV6 = 9, +}; + +struct gve_adminq_flow_spec { + __be32 src_ip[4]; + __be32 dst_ip[4]; + union { + struct { + __be16 src_port; + __be16 dst_port; + }; + __be32 spi; + }; + union { + u8 tos; + u8 tclass; + }; +}; +static_assert(sizeof(struct gve_adminq_flow_spec) == 40); + +/* Flow-steering command */ +struct gve_adminq_flow_rule { + __be16 flow_type; + __be16 action; /* Queue */ + struct gve_adminq_flow_spec key; + struct gve_adminq_flow_spec mask; /* ports can be 0 or 0xffff */ +}; + +struct gve_adminq_configure_flow_rule { + __be16 cmd; + __be16 loc; + struct gve_adminq_flow_rule rule; +}; +static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 88); + union gve_adminq_command { struct { __be32 opcode; @@ -408,6 +483,7 @@ union gve_adminq_command { struct gve_adminq_get_ptype_map get_ptype_map; struct gve_adminq_verify_driver_compatibility verify_driver_compatibility; + struct gve_adminq_extended_command extended_command; }; }; u8 reserved[64]; @@ -439,6 +515,10 @@ int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, u64 driver_info_len, dma_addr_t driver_info_addr); int gve_adminq_report_link_speed(struct gve_priv *priv); +int gve_adminq_add_flow_rule(struct gve_priv *priv, + struct gve_flow_rule *rule); +int gve_adminq_del_flow_rule(struct gve_priv *priv, int loc); +int gve_adminq_reset_flow_rules(struct gve_priv *priv); struct gve_ptype_lut; int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, diff --git a/google/gve/gve_ethtool.c b/google/gve/gve_ethtool.c index 38f2e1a..b3af8eb 100644 --- a/google/gve/gve_ethtool.c +++ b/google/gve/gve_ethtool.c @@ -75,7 +75,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt", "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt", "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", - "adminq_report_stats_cnt", "adminq_report_link_speed_cnt" + "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", + "adminq_cfg_flow_rule" }; static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { @@ -434,6 +435,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = priv->adminq_set_driver_parameter_cnt; data[i++] = priv->adminq_report_stats_cnt; data[i++] = priv->adminq_report_link_speed_cnt; + data[i++] = priv->adminq_cfg_flow_rule_cnt; } static void gve_get_channels(struct net_device *netdev, @@ -699,6 +701,541 @@ static int gve_set_coalesce(struct net_device *netdev, return 0; } +static const char *gve_flow_type_name(enum gve_adminq_flow_type flow_type) +{ + switch (flow_type) { + case GVE_FLOW_TYPE_TCPV4: + case GVE_FLOW_TYPE_TCPV6: + return "TCP"; + case GVE_FLOW_TYPE_UDPV4: + case GVE_FLOW_TYPE_UDPV6: + return "UDP"; + case GVE_FLOW_TYPE_SCTPV4: + case GVE_FLOW_TYPE_SCTPV6: + return "SCTP"; + case GVE_FLOW_TYPE_AHV4: + case GVE_FLOW_TYPE_AHV6: + return "AH"; + case GVE_FLOW_TYPE_ESPV4: + case GVE_FLOW_TYPE_ESPV6: + return "ESP"; + } + return NULL; +} + +static void gve_print_flow_rule(struct gve_priv *priv, + struct gve_flow_rule *rule) +{ + const char *proto = gve_flow_type_name(rule->flow_type); + + if (!proto) + return; + + switch (rule->flow_type) { + case GVE_FLOW_TYPE_TCPV4: + case GVE_FLOW_TYPE_UDPV4: + case GVE_FLOW_TYPE_SCTPV4: + dev_info(&priv->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n", + rule->loc, + &rule->key.dst_ip[0], + &rule->key.src_ip[0], + proto, + ntohs(rule->key.dst_port), + ntohs(rule->key.src_port)); + break; + case GVE_FLOW_TYPE_AHV4: + case GVE_FLOW_TYPE_ESPV4: + dev_info(&priv->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: spi %hu\n", + rule->loc, + &rule->key.dst_ip[0], + &rule->key.src_ip[0], + proto, + ntohl(rule->key.spi)); + break; + case GVE_FLOW_TYPE_TCPV6: + case GVE_FLOW_TYPE_UDPV6: + case GVE_FLOW_TYPE_SCTPV6: + dev_info(&priv->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n", + rule->loc, + &rule->key.dst_ip, + &rule->key.src_ip, + proto, + ntohs(rule->key.dst_port), + ntohs(rule->key.src_port)); + break; + case GVE_FLOW_TYPE_AHV6: + case GVE_FLOW_TYPE_ESPV6: + dev_info(&priv->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: spi %hu\n", + rule->loc, + &rule->key.dst_ip, + &rule->key.src_ip, + proto, + ntohl(rule->key.spi)); + break; + default: + break; + } +} + +static bool gve_flow_rule_is_dup_rule(struct gve_priv *priv, struct gve_flow_rule *rule) +{ + struct gve_flow_rule *tmp; + + list_for_each_entry(tmp, &priv->flow_rules, list) { + if (tmp->flow_type != rule->flow_type) + continue; + + if (!memcmp(&tmp->key, &rule->key, + sizeof(struct gve_flow_spec)) && + !memcmp(&tmp->mask, &rule->mask, + sizeof(struct gve_flow_spec))) + return true; + } + return false; +} + +static struct gve_flow_rule *gve_find_flow_rule_by_loc(struct gve_priv *priv, u16 loc) +{ + struct gve_flow_rule *rule; + + list_for_each_entry(rule, &priv->flow_rules, list) + if (rule->loc == loc) + return rule; + + return NULL; +} + +static void gve_flow_rules_add_rule(struct gve_priv *priv, struct gve_flow_rule *rule) +{ + struct gve_flow_rule *tmp, *parent = NULL; + + list_for_each_entry(tmp, &priv->flow_rules, list) { + if (tmp->loc >= rule->loc) + break; + parent = tmp; + } + + if (parent) + list_add(&rule->list, &parent->list); + else + list_add(&rule->list, &priv->flow_rules); + + priv->flow_rules_cnt++; +} + +static void gve_flow_rules_del_rule(struct gve_priv *priv, struct gve_flow_rule *rule) +{ + list_del(&rule->list); + kvfree(rule); + priv->flow_rules_cnt--; +} + +static int +gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct gve_flow_rule *rule = NULL; + int err = 0; + + if (priv->flow_rules_max == 0) + return -EOPNOTSUPP; + + spin_lock_bh(&priv->flow_rules_lock); + rule = gve_find_flow_rule_by_loc(priv, fsp->location); + if (!rule) { + err = -EINVAL; + goto ret; + } + + switch (rule->flow_type) { + case GVE_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case GVE_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case GVE_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case GVE_FLOW_TYPE_AHV4: + fsp->flow_type = AH_V4_FLOW; + break; + case GVE_FLOW_TYPE_ESPV4: + fsp->flow_type = ESP_V4_FLOW; + break; + case GVE_FLOW_TYPE_TCPV6: + fsp->flow_type = TCP_V6_FLOW; + break; + case GVE_FLOW_TYPE_UDPV6: + fsp->flow_type = UDP_V6_FLOW; + break; + case GVE_FLOW_TYPE_SCTPV6: + fsp->flow_type = SCTP_V6_FLOW; + break; + case GVE_FLOW_TYPE_AHV6: + fsp->flow_type = AH_V6_FLOW; + break; + case GVE_FLOW_TYPE_ESPV6: + fsp->flow_type = ESP_V6_FLOW; + break; + default: + err = -EINVAL; + goto ret; + } + + memset(&fsp->h_u, 0, sizeof(fsp->h_u)); + memset(&fsp->h_ext, 0, sizeof(fsp->h_ext)); + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fsp->h_u.tcp_ip4_spec.ip4src = rule->key.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->key.dst_ip[0]; + fsp->h_u.tcp_ip4_spec.psrc = rule->key.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->key.dst_port; + fsp->h_u.tcp_ip4_spec.tos = rule->key.tos; + fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.psrc = rule->mask.src_port; + fsp->m_u.tcp_ip4_spec.pdst = rule->mask.dst_port; + fsp->m_u.tcp_ip4_spec.tos = rule->mask.tos; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + fsp->h_u.ah_ip4_spec.ip4src = rule->key.src_ip[0]; + fsp->h_u.ah_ip4_spec.ip4dst = rule->key.dst_ip[0]; + fsp->h_u.ah_ip4_spec.spi = rule->key.spi; + fsp->h_u.ah_ip4_spec.tos = rule->key.tos; + fsp->m_u.ah_ip4_spec.ip4src = rule->mask.src_ip[0]; + fsp->m_u.ah_ip4_spec.ip4dst = rule->mask.dst_ip[0]; + fsp->m_u.ah_ip4_spec.spi = rule->mask.spi; + fsp->m_u.ah_ip4_spec.tos = rule->mask.tos; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(fsp->h_u.tcp_ip6_spec.ip6src, &rule->key.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, &rule->key.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.tcp_ip6_spec.psrc = rule->key.src_port; + fsp->h_u.tcp_ip6_spec.pdst = rule->key.dst_port; + fsp->h_u.tcp_ip6_spec.tclass = rule->key.tclass; + memcpy(fsp->m_u.tcp_ip6_spec.ip6src, &rule->mask.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, &rule->mask.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.tcp_ip6_spec.psrc = rule->mask.src_port; + fsp->m_u.tcp_ip6_spec.pdst = rule->mask.dst_port; + fsp->m_u.tcp_ip6_spec.tclass = rule->mask.tclass; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->key.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->key.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.ah_ip6_spec.spi = rule->key.spi; + fsp->h_u.ah_ip6_spec.tclass = rule->key.tclass; + memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->mask.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->mask.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.ah_ip6_spec.spi = rule->mask.spi; + fsp->m_u.ah_ip6_spec.tclass = rule->mask.tclass; + break; + default: + err = -EINVAL; + goto ret; + } + + fsp->ring_cookie = rule->action; + +ret: + spin_unlock_bh(&priv->flow_rules_lock); + return err; +} + +static int +gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct gve_flow_rule *rule; + unsigned int cnt = 0; + int err = 0; + + if (priv->flow_rules_max == 0) + return -EOPNOTSUPP; + + cmd->data = priv->flow_rules_max; + + spin_lock_bh(&priv->flow_rules_lock); + list_for_each_entry(rule, &priv->flow_rules, list) { + if (cnt == cmd->rule_cnt) { + err = -EMSGSIZE; + goto ret; + } + rule_locs[cnt] = rule->loc; + cnt++; + } + cmd->rule_cnt = cnt; + +ret: + spin_unlock_bh(&priv->flow_rules_lock); + return err; +} + +static int +gve_add_flow_rule_info(struct gve_priv *priv, struct ethtool_rx_flow_spec *fsp, + struct gve_flow_rule *rule) +{ + u32 flow_type, q_index = 0; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + return -EOPNOTSUPP; + + q_index = fsp->ring_cookie; + if (q_index >= priv->rx_cfg.num_queues) + return -EINVAL; + + rule->action = q_index; + rule->loc = fsp->location; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + switch (flow_type) { + case TCP_V4_FLOW: + rule->flow_type = GVE_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + rule->flow_type = GVE_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + rule->flow_type = GVE_FLOW_TYPE_SCTPV4; + break; + case AH_V4_FLOW: + rule->flow_type = GVE_FLOW_TYPE_AHV4; + break; + case ESP_V4_FLOW: + rule->flow_type = GVE_FLOW_TYPE_ESPV4; + break; + case TCP_V6_FLOW: + rule->flow_type = GVE_FLOW_TYPE_TCPV6; + break; + case UDP_V6_FLOW: + rule->flow_type = GVE_FLOW_TYPE_UDPV6; + break; + case SCTP_V6_FLOW: + rule->flow_type = GVE_FLOW_TYPE_SCTPV6; + break; + case AH_V6_FLOW: + rule->flow_type = GVE_FLOW_TYPE_AHV6; + break; + case ESP_V6_FLOW: + rule->flow_type = GVE_FLOW_TYPE_ESPV6; + break; + default: + return -EINVAL; + } + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + rule->key.src_port = fsp->h_u.tcp_ip4_spec.psrc; + rule->key.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + rule->mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; + rule->mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + rule->key.spi = fsp->h_u.ah_ip4_spec.spi; + rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + rule->mask.spi = fsp->m_u.ah_ip4_spec.spi; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(&rule->key.src_ip, fsp->h_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->key.dst_ip, fsp->h_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->key.src_port = fsp->h_u.tcp_ip6_spec.psrc; + rule->key.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + memcpy(&rule->mask.src_ip, fsp->m_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->mask.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; + rule->mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(&rule->key.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->key.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->key.spi = fsp->h_u.ah_ip6_spec.spi; + memcpy(&rule->mask.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->mask.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->key.spi = fsp->h_u.ah_ip6_spec.spi; + break; + default: + /* not doing un-parsed flow types */ + return -EINVAL; + } + + if (gve_flow_rule_is_dup_rule(priv, rule)) + return -EEXIST; + + return 0; +} + +static int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct gve_flow_rule *rule = NULL; + int err; + + if (priv->flow_rules_max == 0) + return -EOPNOTSUPP; + + if (priv->flow_rules_cnt >= priv->flow_rules_max) { + dev_err(&priv->pdev->dev, + "Reached the limit of max allowed flow rules (%u)\n", + priv->flow_rules_max); + return -ENOSPC; + } + + spin_lock_bh(&priv->flow_rules_lock); + if (gve_find_flow_rule_by_loc(priv, fsp->location)) { + dev_err(&priv->pdev->dev, "Flow rule %d already exists\n", + fsp->location); + err = -EEXIST; + goto ret; + } + + rule = kvzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) { + err = -ENOMEM; + goto ret; + } + + err = gve_add_flow_rule_info(priv, fsp, rule); + if (err) + goto ret; + + err = gve_adminq_add_flow_rule(priv, rule); + if (err) + goto ret; + + gve_flow_rules_add_rule(priv, rule); + gve_print_flow_rule(priv, rule); + +ret: + spin_unlock_bh(&priv->flow_rules_lock); + if (err && rule) + kfree(rule); + return err; +} + +static int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct gve_flow_rule *rule = NULL; + int err = 0; + + if (priv->flow_rules_max == 0) + return -EOPNOTSUPP; + + spin_lock_bh(&priv->flow_rules_lock); + rule = gve_find_flow_rule_by_loc(priv, fsp->location); + if (!rule) { + err = -EINVAL; + goto ret; + } + + err = gve_adminq_del_flow_rule(priv, fsp->location); + if (err) + goto ret; + + gve_flow_rules_del_rule(priv, rule); + +ret: + spin_unlock_bh(&priv->flow_rules_lock); + return err; +} + +static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct gve_priv *priv = netdev_priv(netdev); + int err = -EOPNOTSUPP; + + if (!(netdev->features & NETIF_F_NTUPLE)) + return err; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = gve_add_flow_rule(priv, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = gve_del_flow_rule(priv, cmd); + break; + case ETHTOOL_SRXFH: + /* not supported */ + break; + default: + break; + } + + return err; +} + +static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct gve_priv *priv = netdev_priv(netdev); + int err = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->rx_cfg.num_queues; + err = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + if (priv->flow_rules_max == 0) + break; + cmd->rule_cnt = priv->flow_rules_cnt; + cmd->data = priv->flow_rules_max; + err = 0; + break; + case ETHTOOL_GRXCLSRULE: + err = gve_get_flow_rule_entry(priv, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + /* not supported */ + break; + default: + break; + } + + return err; +} + const struct ethtool_ops gve_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = gve_get_drvinfo, @@ -709,6 +1246,8 @@ const struct ethtool_ops gve_ethtool_ops = { .get_msglevel = gve_get_msglevel, .set_channels = gve_set_channels, .get_channels = gve_get_channels, + .set_rxnfc = gve_set_rxnfc, + .get_rxnfc = gve_get_rxnfc, .get_link = ethtool_op_get_link, .get_coalesce = gve_get_coalesce, .set_coalesce = gve_set_coalesce, diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index dfb4b8d..d44c4d7 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -1681,6 +1681,26 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +int gve_flow_rules_reset(struct gve_priv *priv) +{ + struct gve_flow_rule *cur, *next; + int err; + + if (priv->flow_rules_cnt == 0) + return 0; + + err = gve_adminq_reset_flow_rules(priv); + if (err) + return err; + + list_for_each_entry_safe(cur, next, &priv->flow_rules, list) { + list_del(&cur->list); + kvfree(cur); + priv->flow_rules_cnt--; + } + return 0; +} + static int gve_adjust_queue_count(struct gve_priv *priv, struct gve_queue_config new_rx_config, struct gve_queue_config new_tx_config) @@ -1695,6 +1715,8 @@ static int gve_adjust_queue_count(struct gve_priv *priv, else priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; + + err = gve_flow_rules_reset(priv); return err; } @@ -1874,6 +1896,9 @@ static int gve_set_features(struct net_device *netdev, } } + if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) + gve_flow_rules_reset(priv); + return 0; err: /* Reverts the change on error. */ @@ -2077,6 +2102,9 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1; priv->mgmt_msix_idx = priv->num_ntfy_blks; + spin_lock_init(&priv->flow_rules_lock); + INIT_LIST_HEAD(&priv->flow_rules); + priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2); priv->rx_cfg.max_queues = diff --git a/patches/ethtool_ntuple.cocci b/patches/ethtool_ntuple.cocci new file mode 100644 index 0000000..6fe5272 --- /dev/null +++ b/patches/ethtool_ntuple.cocci @@ -0,0 +1,33 @@ +@@ +@@ +#include ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) ++#define FLOW_RSS 0 ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0)) */ +@@ +expression flow_type; +@@ + switch (flow_type) { + case TCP_V6_FLOW: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) + ... ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) */ + case UDP_V6_FLOW: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) + ... ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) */ + case SCTP_V6_FLOW: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) + ... ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) */ + case AH_V6_FLOW: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) + ... ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) */ + case ESP_V6_FLOW: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) + ... ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) */ + default: + ... + } From b46e6a6bb5418bdc17e0b8ead56b70ebe10ed8b7 Mon Sep 17 00:00:00 2001 From: Ziwei Xiao Date: Thu, 25 May 2023 15:28:21 -0700 Subject: [PATCH 14/28] Add RSS configuration support - Add adminq command to configure RSS - Add RSS configuration support to ethtool --- google/gve/gve.h | 25 ++++++++++++ google/gve/gve_adminq.c | 69 +++++++++++++++++++++++++++++++ google/gve/gve_adminq.h | 27 +++++++++++++ google/gve/gve_ethtool.c | 87 +++++++++++++++++++++++++++++++++++++++- google/gve/gve_main.c | 55 +++++++++++++++++++++++++ 5 files changed, 262 insertions(+), 1 deletion(-) diff --git a/google/gve/gve.h b/google/gve/gve.h index abad376..28e7fb0 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -49,6 +49,9 @@ #define GVE_MIN_RX_BUFFER_SIZE 2048 #define GVE_MAX_RX_BUFFER_SIZE 4096 +#define GVE_RSS_KEY_SIZE 40 +#define GVE_RSS_INDIR_SIZE 128 + #define GVE_HEADER_BUFFER_SIZE_MIN 64 #define GVE_HEADER_BUFFER_SIZE_MAX 256 #define GVE_HEADER_BUFFER_SIZE_DEFAULT 128 @@ -544,6 +547,19 @@ struct gve_ptype_lut { struct gve_ptype ptypes[GVE_NUM_PTYPES]; }; +enum gve_rss_hash_alg { + GVE_RSS_HASH_UNDEFINED = 0, + GVE_RSS_HASH_TOEPLITZ = 1, +}; + +struct gve_rss_config { + enum gve_rss_hash_alg alg; + u16 key_size; + u16 indir_size; + u8 *key; + u32 *indir; +}; + /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value * when the entire configure_device_resources command is zeroed out and the * queue_format is not specified. @@ -642,6 +658,7 @@ struct gve_priv { u32 adminq_get_ptype_map_cnt; u32 adminq_verify_driver_compatibility_cnt; u32 adminq_cfg_flow_rule_cnt; + u32 adminq_cfg_rss_cnt; /* Global stats */ u32 interface_up_cnt; /* count of times interface turned up since last reset */ @@ -698,6 +715,9 @@ struct gve_priv { u16 flow_rules_cnt; struct list_head flow_rules; spinlock_t flow_rules_lock; + + /* RSS configuration */ + struct gve_rss_config rss_config; }; enum gve_service_task_flags_bit { @@ -1047,6 +1067,11 @@ int gve_flow_rules_reset(struct gve_priv *priv); /* report stats handling */ void gve_handle_report_stats(struct gve_priv *priv); + +/* RSS support */ +int gve_rss_config_init(struct gve_priv *priv); +void gve_rss_set_default_indir(struct gve_priv *priv); + /* exported by ethtool.c */ extern const struct ethtool_ops gve_ethtool_ops; /* needed by ethtool */ diff --git a/google/gve/gve_adminq.c b/google/gve/gve_adminq.c index 8967f57..8148518 100644 --- a/google/gve/gve_adminq.c +++ b/google/gve/gve_adminq.c @@ -441,6 +441,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES: priv->adminq_dcfg_device_resources_cnt++; break; + case GVE_ADMINQ_CONFIGURE_RSS: + priv->adminq_cfg_rss_cnt++; + break; case GVE_ADMINQ_SET_DRIVER_PARAMETER: priv->adminq_set_driver_parameter_cnt++; break; @@ -1195,3 +1198,69 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv) }; return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); } + +int gve_adminq_configure_rss(struct gve_priv *priv, + struct gve_rss_config *rss_config) +{ + dma_addr_t indir_bus = 0, key_bus = 0; + union gve_adminq_command cmd; + __be32 *indir = NULL; + u8 *key = NULL; + int err = 0; + int i; + + if (rss_config->indir_size) { + indir = dma_alloc_coherent(&priv->pdev->dev, + rss_config->indir_size * + sizeof(*rss_config->indir), + &indir_bus, GFP_KERNEL); + if (!indir) { + err = -ENOMEM; + goto out; + } + for (i = 0; i < rss_config->indir_size; i++) + indir[i] = cpu_to_be32(rss_config->indir[i]); + } + + if (rss_config->key_size) { + key = dma_alloc_coherent(&priv->pdev->dev, + rss_config->key_size * + sizeof(*rss_config->key), + &key_bus, GFP_KERNEL); + if (!key) { + err = -ENOMEM; + goto out; + } + memcpy(key, rss_config->key, rss_config->key_size); + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_RSS); + cmd.configure_rss = (struct gve_adminq_configure_rss) { + .hash_types = cpu_to_be16(GVE_RSS_HASH_TCPV4 | + GVE_RSS_HASH_UDPV4 | + GVE_RSS_HASH_TCPV6 | + GVE_RSS_HASH_UDPV6), + .halg = rss_config->alg, + .hkey_len = cpu_to_be16(rss_config->key_size), + .indir_len = cpu_to_be16(rss_config->indir_size), + .hkey_addr = cpu_to_be64(key_bus), + .indir_addr = cpu_to_be64(indir_bus), + }; + + err = gve_adminq_execute_cmd(priv, &cmd); + +out: + if (indir) + dma_free_coherent(&priv->pdev->dev, + rss_config->indir_size * + sizeof(*rss_config->indir), + indir, indir_bus); + if (key) + dma_free_coherent(&priv->pdev->dev, + rss_config->key_size * + sizeof(*rss_config->key), + key, key_bus); + return err; +} + diff --git a/google/gve/gve_adminq.h b/google/gve/gve_adminq.h index 279ab6b..44e2483 100644 --- a/google/gve/gve_adminq.h +++ b/google/gve/gve_adminq.h @@ -20,6 +20,7 @@ enum gve_adminq_opcodes { GVE_ADMINQ_DESTROY_TX_QUEUE = 0x7, GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8, GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9, + GVE_ADMINQ_CONFIGURE_RSS = 0xA, GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB, GVE_ADMINQ_REPORT_STATS = 0xC, GVE_ADMINQ_REPORT_LINK_SPEED = 0xD, @@ -463,6 +464,29 @@ struct gve_adminq_configure_flow_rule { }; static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 88); +#define GVE_RSS_HASH_IPV4 BIT(0) +#define GVE_RSS_HASH_TCPV4 BIT(1) +#define GVE_RSS_HASH_IPV6 BIT(2) +#define GVE_RSS_HASH_IPV6_EX BIT(3) +#define GVE_RSS_HASH_TCPV6 BIT(4) +#define GVE_RSS_HASH_TCPV6_EX BIT(5) +#define GVE_RSS_HASH_UDPV4 BIT(6) +#define GVE_RSS_HASH_UDPV6 BIT(7) +#define GVE_RSS_HASH_UDPV6_EX BIT(8) + +/* RSS configuration command */ +struct gve_adminq_configure_rss { + __be16 hash_types; + u8 halg; /* hash algorithm */ + u8 reserved; + __be16 hkey_len; + __be16 indir_len; + __be64 hkey_addr; + __be64 indir_addr; +}; + +static_assert(sizeof(struct gve_adminq_configure_rss) == 24); + union gve_adminq_command { struct { __be32 opcode; @@ -477,6 +501,7 @@ union gve_adminq_command { struct gve_adminq_describe_device describe_device; struct gve_adminq_register_page_list reg_page_list; struct gve_adminq_unregister_page_list unreg_page_list; + struct gve_adminq_configure_rss configure_rss; struct gve_adminq_set_driver_parameter set_driver_param; struct gve_adminq_report_stats report_stats; struct gve_adminq_report_link_speed report_link_speed; @@ -514,6 +539,8 @@ int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, u64 driver_info_len, dma_addr_t driver_info_addr); +int gve_adminq_configure_rss(struct gve_priv *priv, + struct gve_rss_config *config); int gve_adminq_report_link_speed(struct gve_priv *priv); int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_flow_rule *rule); diff --git a/google/gve/gve_ethtool.c b/google/gve/gve_ethtool.c index b3af8eb..5db6e81 100644 --- a/google/gve/gve_ethtool.c +++ b/google/gve/gve_ethtool.c @@ -76,7 +76,7 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt", "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", - "adminq_cfg_flow_rule" + "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt" }; static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { @@ -436,6 +436,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = priv->adminq_report_stats_cnt; data[i++] = priv->adminq_report_link_speed_cnt; data[i++] = priv->adminq_cfg_flow_rule_cnt; + data[i++] = priv->adminq_cfg_rss_cnt; } static void gve_get_channels(struct net_device *netdev, @@ -701,6 +702,86 @@ static int gve_set_coalesce(struct net_device *netdev, return 0; } +static u32 gve_get_rxfh_key_size(struct net_device *netdev) +{ + return GVE_RSS_KEY_SIZE; +} + +static u32 gve_get_rxfh_indir_size(struct net_device *netdev) +{ + return GVE_RSS_INDIR_SIZE; +} + +static int gve_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct gve_priv *priv = netdev_priv(netdev); + struct gve_rss_config *rss_config = &priv->rss_config; + u16 i; + + if (hfunc) { + switch (rss_config->alg) { + case GVE_RSS_HASH_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case GVE_RSS_HASH_UNDEFINED: + default: + return -EOPNOTSUPP; + } + } + if (key) + memcpy(key, rss_config->key, rss_config->key_size); + + if (indir) + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < rss_config->indir_size; i++) + indir[i] = (u32)rss_config->indir[i]; + + return 0; +} + +static int gve_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct gve_priv *priv = netdev_priv(netdev); + struct gve_rss_config *rss_config = &priv->rss_config; + bool init = false; + u16 i; + int err = 0; + + /* Initialize RSS if not configured before */ + if (rss_config->alg == GVE_RSS_HASH_UNDEFINED) { + err = gve_rss_config_init(priv); + if (err) + return err; + init = true; + } + + switch (hfunc) { + case ETH_RSS_HASH_NO_CHANGE: + break; + case ETH_RSS_HASH_TOP: + rss_config->alg = GVE_RSS_HASH_TOEPLITZ; + break; + default: + return -EOPNOTSUPP; + } + + if (!key && !indir && !init) + return 0; + + if (key) + memcpy(rss_config->key, key, rss_config->key_size); + + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < rss_config->indir_size; i++) + rss_config->indir[i] = indir[i]; + } + + return gve_adminq_configure_rss(priv, rss_config); +} + static const char *gve_flow_type_name(enum gve_adminq_flow_type flow_type) { switch (flow_type) { @@ -1248,6 +1329,10 @@ const struct ethtool_ops gve_ethtool_ops = { .get_channels = gve_get_channels, .set_rxnfc = gve_set_rxnfc, .get_rxnfc = gve_get_rxnfc, + .get_rxfh_indir_size = gve_get_rxfh_indir_size, + .get_rxfh_key_size = gve_get_rxfh_key_size, + .get_rxfh = gve_get_rxfh, + .set_rxfh = gve_set_rxfh, .get_link = ethtool_op_get_link, .get_coalesce = gve_get_coalesce, .set_coalesce = gve_set_coalesce, diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index d44c4d7..ed419d9 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -1717,6 +1717,12 @@ static int gve_adjust_queue_count(struct gve_priv *priv, err = gve_flow_rules_reset(priv); + if (err) + return err; + + if (priv->rss_config.alg != GVE_RSS_HASH_UNDEFINED) + err = gve_rss_config_init(priv); + return err; } @@ -2434,6 +2440,55 @@ static int gve_resume(struct pci_dev *pdev) } #endif /* CONFIG_PM */ +void gve_rss_set_default_indir(struct gve_priv *priv) +{ + struct gve_rss_config *rss_config = &priv->rss_config; + int i; + + for (i = 0; i < GVE_RSS_INDIR_SIZE; i++) + rss_config->indir[i] = i % priv->rx_cfg.num_queues; +} + +int gve_rss_config_init(struct gve_priv *priv) +{ + struct gve_rss_config *rss_config = &priv->rss_config; + + if (rss_config->key) + kvfree(rss_config->key); + + if (rss_config->indir) + kvfree(rss_config->indir); + + memset(rss_config, 0, sizeof(*rss_config)); + + rss_config->key = kvzalloc(GVE_RSS_KEY_SIZE, GFP_KERNEL); + if (!rss_config->key) + goto err; + + netdev_rss_key_fill(rss_config->key, GVE_RSS_KEY_SIZE); + + rss_config->indir = kvcalloc(GVE_RSS_INDIR_SIZE, + sizeof(*rss_config->indir), + GFP_KERNEL); + if (!rss_config->indir) + goto err; + + + rss_config->alg = GVE_RSS_HASH_TOEPLITZ; + rss_config->key_size = GVE_RSS_KEY_SIZE; + rss_config->indir_size = GVE_RSS_INDIR_SIZE; + gve_rss_set_default_indir(priv); + + return gve_adminq_configure_rss(priv, rss_config); + +err: + if (rss_config->key) { + kvfree(rss_config->key); + rss_config->key = NULL; + } + return -ENOMEM; +} + static const struct pci_device_id gve_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) }, { } From b9554b143848e78626742193139f53fb00b734f1 Mon Sep 17 00:00:00 2001 From: Ziwei Xiao Date: Mon, 5 Jun 2023 14:04:39 -0700 Subject: [PATCH 15/28] gve: add flow steering and rss reset when teardown device resources --- google/gve/gve.h | 1 + google/gve/gve_main.c | 37 ++++++++++++++++++++----------------- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/google/gve/gve.h b/google/gve/gve.h index 28e7fb0..96ed351 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -1071,6 +1071,7 @@ void gve_handle_report_stats(struct gve_priv *priv); /* RSS support */ int gve_rss_config_init(struct gve_priv *priv); void gve_rss_set_default_indir(struct gve_priv *priv); +void gve_rss_config_release(struct gve_rss_config *rss_config); /* exported by ethtool.c */ extern const struct ethtool_ops gve_ethtool_ops; diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index ed419d9..c1f440c 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -550,6 +550,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv) /* Tell device its resources are being freed */ if (gve_get_device_resources_ok(priv)) { + gve_flow_rules_reset(priv); /* detach the stats report */ err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD); if (err) { @@ -569,6 +570,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv) kvfree(priv->ptype_lut_dqo); priv->ptype_lut_dqo = NULL; + gve_rss_config_release(&priv->rss_config); gve_free_counter_array(priv); gve_free_notify_blocks(priv); gve_free_stats_report(priv); @@ -1705,6 +1707,7 @@ static int gve_adjust_queue_count(struct gve_priv *priv, struct gve_queue_config new_rx_config, struct gve_queue_config new_tx_config) { + struct gve_queue_config old_rx_config = priv->rx_cfg; int err = 0; priv->rx_cfg = new_rx_config; @@ -1716,12 +1719,14 @@ static int gve_adjust_queue_count(struct gve_priv *priv, priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; - err = gve_flow_rules_reset(priv); - if (err) - return err; + if (old_rx_config.num_queues != new_rx_config.num_queues) { + err = gve_flow_rules_reset(priv); + if (err) + return err; - if (priv->rss_config.alg != GVE_RSS_HASH_UNDEFINED) - err = gve_rss_config_init(priv); + if (priv->rss_config.alg != GVE_RSS_HASH_UNDEFINED) + err = gve_rss_config_init(priv); + } return err; } @@ -2449,17 +2454,18 @@ void gve_rss_set_default_indir(struct gve_priv *priv) rss_config->indir[i] = i % priv->rx_cfg.num_queues; } +void gve_rss_config_release(struct gve_rss_config *rss_config) +{ + kvfree(rss_config->key); + kvfree(rss_config->indir); + memset(rss_config, 0, sizeof(*rss_config)); +} + int gve_rss_config_init(struct gve_priv *priv) { struct gve_rss_config *rss_config = &priv->rss_config; - if (rss_config->key) - kvfree(rss_config->key); - - if (rss_config->indir) - kvfree(rss_config->indir); - - memset(rss_config, 0, sizeof(*rss_config)); + gve_rss_config_release(rss_config); rss_config->key = kvzalloc(GVE_RSS_KEY_SIZE, GFP_KERNEL); if (!rss_config->key) @@ -2473,7 +2479,6 @@ int gve_rss_config_init(struct gve_priv *priv) if (!rss_config->indir) goto err; - rss_config->alg = GVE_RSS_HASH_TOEPLITZ; rss_config->key_size = GVE_RSS_KEY_SIZE; rss_config->indir_size = GVE_RSS_INDIR_SIZE; @@ -2482,10 +2487,8 @@ int gve_rss_config_init(struct gve_priv *priv) return gve_adminq_configure_rss(priv, rss_config); err: - if (rss_config->key) { - kvfree(rss_config->key); - rss_config->key = NULL; - } + kvfree(rss_config->key); + rss_config->key = NULL; return -ENOMEM; } From 2623888352c586b4207bd89f10db2a2b2063e17f Mon Sep 17 00:00:00 2001 From: Ziwei Xiao Date: Sun, 16 Jul 2023 23:11:33 -0700 Subject: [PATCH 16/28] gve: Add tx watchdog to avoid race condition on miss path - When it receives the tx miss completion, the driver will set 1 sec timeout to wait the later reinjection completion and keep the skb until then. This adds a race condition when the application is blocked because of the unreleased skb, and then the application won't send out new packets to trigger the tx napi poll interrupt. While the driver needs the interrupt to get into the napi poll to handle the tx path. Then both the driver and the application will be blocked. - This change adds a tx watchdog which will check whether there is packets in the miss completion need to deal with every 1 seconds. The tx watchdog will trigger the tx napi poll interrupt so that the race condition will be avoided. --- google/gve/gve.h | 11 +++ google/gve/gve_dqo.h | 1 + google/gve/gve_main.c | 83 ++++++++++++++++++- google/gve/gve_tx_dqo.c | 9 ++ .../timer_setup_tx_timeout_miss_path.cocci | 39 +++++++++ 5 files changed, 142 insertions(+), 1 deletion(-) create mode 100644 patches/timer_setup_tx_timeout_miss_path.cocci diff --git a/google/gve/gve.h b/google/gve/gve.h index 96ed351..c6d4c10 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -42,6 +42,9 @@ #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) +// TX timeout period to check the miss path +#define GVE_TX_TIMEOUT_PERIOD 1 * HZ + /* PTYPEs are always 10 bits. */ #define GVE_NUM_PTYPES 1024 @@ -429,6 +432,10 @@ struct gve_tx_ring { /* Tracks the current gen bit of compl_q */ u8 cur_gen_bit; + /* the jiffies when last TX completion was processed*/ + unsigned long last_processed; + bool kicked; + /* Linked list of gve_tx_pending_packet_dqo. Index into * pending_packets, or -1 if empty. * @@ -684,6 +691,10 @@ struct gve_priv { unsigned long stats_report_timer_period; struct timer_list stats_report_timer; + unsigned long tx_timeout_period; + /* tx timeout timer for the miss path */ + struct timer_list tx_timeout_timer; + /* Gvnic device link speed from hypervisor. */ u64 link_speed; bool up_before_suspend; /* True if dev was up before suspend */ diff --git a/google/gve/gve_dqo.h b/google/gve/gve_dqo.h index 1eb4d5f..9579162 100644 --- a/google/gve/gve_dqo.h +++ b/google/gve/gve_dqo.h @@ -35,6 +35,7 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev); bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean); int gve_rx_poll_dqo(struct gve_notify_block *block, int budget); +bool gve_tx_work_pending_dqo(struct gve_tx_ring *tx); int gve_tx_alloc_rings_dqo(struct gve_priv *priv); void gve_tx_free_rings_dqo(struct gve_priv *priv); int gve_rx_alloc_rings_dqo(struct gve_priv *priv); diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index c1f440c..05c4ab7 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -212,6 +212,76 @@ static void gve_free_stats_report(struct gve_priv *priv) priv->stats_report = NULL; } +static void gve_tx_timeout_for_miss_path(struct net_device *dev, unsigned int txqueue) +{ + struct gve_notify_block *block; + struct gve_tx_ring *tx = NULL; + bool has_work = false; + struct gve_priv *priv; + u32 ntfy_idx; + + priv = netdev_priv(dev); + + ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); + if (ntfy_idx > priv->num_ntfy_blks) + return; + + block = &priv->ntfy_blocks[ntfy_idx]; + tx = block->tx; + if (!tx) + return; + + /* Check to see if there is pending work */ + has_work = gve_tx_work_pending_dqo(tx); + if (!has_work) + return; + + if (READ_ONCE(tx->dqo_compl.kicked)) { + netdev_warn(dev, + "TX timeout on queue %d. Scheduling reset.", + txqueue); + gve_schedule_reset(priv); + } + + gve_write_irq_doorbell_dqo(priv, block, GVE_ITR_NO_UPDATE_DQO); + + netdev_info(dev, "Kicking tx queue %d for miss path", txqueue); + napi_schedule(&block->napi); + WRITE_ONCE(tx->dqo_compl.kicked, true); +} + +static void gve_tx_timeout_timer(struct timer_list *t) +{ + struct gve_priv *priv = from_timer(priv, t, tx_timeout_timer); + int i; + + for(i = 0; i < priv->tx_cfg.num_queues; i++) { + if (time_after(jiffies, READ_ONCE(priv->tx[i].dqo_compl.last_processed) + + priv->tx_timeout_period)) { + gve_tx_timeout_for_miss_path(priv->dev, i); + } + } + mod_timer(&priv->tx_timeout_timer, + jiffies + priv->tx_timeout_period); +} + +static int gve_setup_tx_timeout_timer(struct gve_priv *priv) +{ + /* Set up 1 sec timer to check no reinjection on miss path */ + if (gve_is_gqi(priv)) + return 0; + priv->tx_timeout_period = GVE_TX_TIMEOUT_PERIOD; + timer_setup(&priv->tx_timeout_timer, gve_tx_timeout_timer, 0); + return 0; +} + +static void gve_free_tx_timeout_timer(struct gve_priv *priv) +{ + if (gve_is_gqi(priv)) + return; + del_timer_sync(&priv->tx_timeout_timer); +} + static irqreturn_t gve_mgmnt_intr(int irq, void *arg) { struct gve_priv *priv = arg; @@ -490,9 +560,12 @@ static int gve_setup_device_resources(struct gve_priv *priv) err = gve_alloc_notify_blocks(priv); if (err) goto abort_with_counter; + err = gve_setup_tx_timeout_timer(priv); + if(err) + goto abort_with_ntfy_blocks; err = gve_alloc_stats_report(priv); if (err) - goto abort_with_ntfy_blocks; + goto abort_with_tx_timeout; err = gve_adminq_configure_device_resources(priv, priv->counter_array_bus, priv->num_event_counters, @@ -534,6 +607,8 @@ static int gve_setup_device_resources(struct gve_priv *priv) priv->ptype_lut_dqo = NULL; abort_with_stats_report: gve_free_stats_report(priv); +abort_with_tx_timeout: + gve_free_tx_timeout_timer(priv); abort_with_ntfy_blocks: gve_free_notify_blocks(priv); abort_with_counter: @@ -574,6 +649,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv) gve_free_counter_array(priv); gve_free_notify_blocks(priv); gve_free_stats_report(priv); + gve_free_tx_timeout_timer(priv); gve_clear_device_resources_ok(priv); } @@ -1322,6 +1398,10 @@ static int gve_open(struct net_device *dev) round_jiffies(jiffies + msecs_to_jiffies(priv->stats_report_timer_period))); + if (!gve_is_gqi(priv)) + mod_timer(&priv->tx_timeout_timer, + jiffies + priv->tx_timeout_period); + gve_turnup(priv); queue_work(priv->gve_wq, &priv->service_task); priv->interface_up_cnt++; @@ -1365,6 +1445,7 @@ static int gve_close(struct net_device *dev) gve_clear_device_rings_ok(priv); } del_timer_sync(&priv->stats_report_timer); + del_timer_sync(&priv->tx_timeout_timer); gve_unreg_xdp_info(priv); gve_free_rings(priv); diff --git a/google/gve/gve_tx_dqo.c b/google/gve/gve_tx_dqo.c index 59ab547..cd3779b 100644 --- a/google/gve/gve_tx_dqo.c +++ b/google/gve/gve_tx_dqo.c @@ -995,6 +995,9 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, remove_miss_completions(priv, tx); remove_timed_out_completions(priv, tx); + WRITE_ONCE(tx->dqo_compl.last_processed, jiffies); + WRITE_ONCE(tx->dqo_compl.kicked, false); + u64_stats_update_begin(&tx->statss); tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes; tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts; @@ -1026,3 +1029,9 @@ bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean) compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head]; return compl_desc->generation != tx->dqo_compl.cur_gen_bit; } + +bool gve_tx_work_pending_dqo(struct gve_tx_ring *tx) +{ + struct gve_index_list *miss_comp_list = &tx->dqo_compl.miss_completions; + return READ_ONCE(miss_comp_list->head) != -1; +} diff --git a/patches/timer_setup_tx_timeout_miss_path.cocci b/patches/timer_setup_tx_timeout_miss_path.cocci new file mode 100644 index 0000000..749aa0b --- /dev/null +++ b/patches/timer_setup_tx_timeout_miss_path.cocci @@ -0,0 +1,39 @@ +@ setup @ +identifier gve_tx_timeout_timer; +struct gve_priv *priv; +@@ + ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ++setup_timer(&priv->tx_timeout_timer, gve_tx_timeout_timer, ++ (unsigned long)priv); ++#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) */ +timer_setup(&priv->tx_timeout_timer, gve_tx_timeout_timer, 0); ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) */ + +@ service @ +type timer_list; +identifier gve_tx_timeout_timer, t, priv, tx_timeout_timer; +@@ + ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ++static void gve_tx_timeout_timer(unsigned long data) ++{ ++ struct gve_priv *priv = (struct gve_priv *)data; ++ int i; ++ ++ for(i = 0; i < priv->tx_cfg.num_queues; i++) { ++ if (time_after(jiffies, priv->tx[i].dqo_compl.last_processed ++ + priv->tx_timeout_period)) { ++ gve_tx_timeout_for_miss_path(priv->dev, i); ++ } ++ } ++ mod_timer(&priv->tx_timeout_timer, ++ jiffies + priv->tx_timeout_period); ++} ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0) */ +static void gve_tx_timeout_timer(timer_list *t) +{ + struct gve_priv *priv = from_timer(priv, t, tx_timeout_timer); + ... +} ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) */ From 0505d2ea2943619a3ad9c8a7045b59133adce1f1 Mon Sep 17 00:00:00 2001 From: Ziwei Xiao Date: Tue, 9 May 2023 15:51:23 -0700 Subject: [PATCH 17/28] gve: Remove the code of clearing PBA bit Clearing the PBA bit from the driver is race prone and it may lead to dropped interrupt events. This could potentially lead to the traffic being completely halted. Fixes: 5e8c5adf95f8 ("gve: DQO: Add core netdev features") Signed-off-by: Ziwei Xiao Signed-off-by: Bailey Forrest Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- google/gve/gve_main.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index 05c4ab7..af0ffaf 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -364,19 +364,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget) bool reschedule = false; int work_done = 0; - /* Clear PCI MSI-X Pending Bit Array (PBA) - * - * This bit is set if an interrupt event occurs while the vector is - * masked. If this bit is set and we reenable the interrupt, it will - * fire again. Since we're just about to poll the queue state, we don't - * need it to fire again. - * - * Under high softirq load, it's possible that the interrupt condition - * is triggered twice before we got the chance to process it. - */ - gve_write_irq_doorbell_dqo(priv, block, - GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO); - if (block->tx) reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true); From 9e38f708df5047498ef4a10a5a5442199083e1ec Mon Sep 17 00:00:00 2001 From: Coco Li Date: Mon, 22 May 2023 13:15:52 -0700 Subject: [PATCH 18/28] gve: Support IPv6 Big TCP on DQ Add support for using IPv6 Big TCP on DQ which can handle large TSO/GRO packets. See https://lwn.net/Articles/895398/. This can improve the throughput and CPU usage. Perf test result: ip -d link show $DEV gso_max_size 185000 gso_max_segs 65535 tso_max_size 262143 tso_max_segs 65535 gro_max_size 185000 For performance, tested with neper using 9k MTU on hardware that supports 200Gb/s line rate. In single streams when line rate is not saturated, we expect throughput improvements. When the networking is performing at line rate, we expect cpu usage improvements. Tcp_stream (unidirectional stream test, T=thread, F=flow): skb=180kb, T=1, F=1, no zerocopy: throughput average=64576.88 Mb/s, sender stime=8.3, receiver stime=10.68 skb=64kb, T=1, F=1, no zerocopy: throughput average=64862.54 Mb/s, sender stime=9.96, receiver stime=12.67 skb=180kb, T=1, F=1, yes zerocopy: throughput average=146604.97 Mb/s, sender stime=10.61, receiver stime=5.52 skb=64kb, T=1, F=1, yes zerocopy: throughput average=131357.78 Mb/s, sender stime=12.11, receiver stime=12.25 skb=180kb, T=20, F=100, no zerocopy: throughput average=182411.37 Mb/s, sender stime=41.62, receiver stime=79.4 skb=64kb, T=20, F=100, no zerocopy: throughput average=182892.02 Mb/s, sender stime=57.39, receiver stime=72.69 skb=180kb, T=20, F=100, yes zerocopy: throughput average=182337.65 Mb/s, sender stime=27.94, receiver stime=39.7 skb=64kb, T=20, F=100, yes zerocopy: throughput average=182144.20 Mb/s, sender stime=47.06, receiver stime=39.01 Signed-off-by: Ziwei Xiao Signed-off-by: Coco Li Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230522201552.3585421-1-ziweixiao@google.com Signed-off-by: Jakub Kicinski --- google/gve/gve_main.c | 5 +++++ google/gve/gve_tx_dqo.c | 4 ++++ patches/ipv6_hopopt_jumbo_remove.cocci | 17 +++++++++++++++++ 3 files changed, 26 insertions(+) create mode 100644 patches/ipv6_hopopt_jumbo_remove.cocci diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index af0ffaf..5c0a330 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -31,6 +31,7 @@ // Minimum amount of time between queue kicks in msec (10 seconds) #define MIN_TX_TIMEOUT_GAP (1000 * 10) +#define DQO_TX_MAX 0x3FFFF const char gve_version_str[] = GVE_VERSION; static const char gve_version_prefix[] = GVE_VERSION_PREFIX; @@ -2173,6 +2174,10 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) goto err; } + /* Big TCP is only supported on DQ*/ + if (!gve_is_gqi(priv)) + netif_set_tso_max_size(priv->dev, DQO_TX_MAX); + priv->num_registered_pages = 0; priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; /* gvnic has one Notification Block per MSI-x vector, except for the diff --git a/google/gve/gve_tx_dqo.c b/google/gve/gve_tx_dqo.c index cd3779b..637caf9 100644 --- a/google/gve/gve_tx_dqo.c +++ b/google/gve/gve_tx_dqo.c @@ -8,6 +8,7 @@ #include "gve_adminq.h" #include "gve_utils.h" #include "gve_dqo.h" +#include #include #include #include @@ -646,6 +647,9 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, goto drop; } + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto drop; + num_buffer_descs = gve_num_buffer_descs_needed(skb); } else { num_buffer_descs = gve_num_buffer_descs_needed(skb); diff --git a/patches/ipv6_hopopt_jumbo_remove.cocci b/patches/ipv6_hopopt_jumbo_remove.cocci new file mode 100644 index 0000000..63cd78b --- /dev/null +++ b/patches/ipv6_hopopt_jumbo_remove.cocci @@ -0,0 +1,17 @@ +@@ +expression priv; +@@ + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,2,1)) +if (!gve_is_gqi(priv)) + netif_set_tso_max_size(priv->dev, DQO_TX_MAX); ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(6,2,1) */ + +@@ +expression skb; +@@ + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,2,1)) +if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto drop; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(6,2,1) */ From cc8cdf61b9463533e5241b7a7e8cdc49ae49008b Mon Sep 17 00:00:00 2001 From: Rushil Gupta Date: Tue, 8 Aug 2023 21:05:55 +0000 Subject: [PATCH 19/28] gve: Control path for DQO-QPL GVE supports QPL ("queue-page-list") mode where all data is communicated through a set of pre-registered pages. Adding this mode to DQO descriptor format. Add checks, abi-changes and device options to support QPL mode for DQO in addition to GQI. Also, use pages-per-qpl supplied by device-option to control the size of the "queue-page-list". Signed-off-by: Rushil Gupta Reviewed-by: Willem de Bruijn Signed-off-by: Praveen Kaligineedi Signed-off-by: Bailey Forrest --- google/gve/gve.h | 29 ++++++++++++-- google/gve/gve_adminq.c | 86 ++++++++++++++++++++++++++++++++++++----- google/gve/gve_adminq.h | 10 +++++ google/gve/gve_main.c | 22 +++++++---- 4 files changed, 126 insertions(+), 21 deletions(-) diff --git a/google/gve/gve.h b/google/gve/gve.h index c6d4c10..8507635 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -63,6 +63,12 @@ #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 +#define DQO_QPL_DEFAULT_TX_PAGES 512 +#define DQO_QPL_DEFAULT_RX_PAGES 2048 + +/* Maximum TSO size supported on DQO */ +#define GVE_DQO_TX_MAX 0x3FFFF + /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ struct gve_rx_desc_queue { struct gve_rx_desc *desc_ring; /* the descriptor ring */ @@ -240,6 +246,9 @@ struct gve_rx_ring { /* Array of buffers for header-split */ struct gve_header_buf *hdr_bufs; + + /* qpl assigned to this queue */ + struct gve_queue_page_list *qpl; } dqo; }; @@ -485,6 +494,12 @@ struct gve_tx_ring { s16 num_pending_packets; u32 complq_mask; /* complq size is complq_mask + 1 */ + + /* QPL fields */ + struct { + /* qpl assigned to this queue */ + struct gve_queue_page_list *qpl; + }; } dqo; } ____cacheline_aligned; struct netdev_queue *netdev_txq; @@ -576,6 +591,7 @@ enum gve_queue_format { GVE_GQI_RDA_FORMAT = 0x1, GVE_GQI_QPL_FORMAT = 0x2, GVE_DQO_RDA_FORMAT = 0x3, + GVE_DQO_QPL_FORMAT = 0x4, }; struct gve_flow_spec { @@ -620,7 +636,8 @@ struct gve_priv { u16 num_event_counters; u16 tx_desc_cnt; /* num desc per ring */ u16 rx_desc_cnt; /* num desc per ring */ - u16 tx_pages_per_qpl; /* tx buffer length */ + u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ + u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */ u16 rx_data_slot_cnt; /* rx buffer length */ u64 max_registered_pages; u64 num_registered_pages; /* num pages registered with NIC */ @@ -923,11 +940,17 @@ static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) return (priv->num_ntfy_blks / 2) + queue_idx; } +static inline bool gve_is_qpl(struct gve_priv *priv) +{ + return priv->queue_format == GVE_GQI_QPL_FORMAT || + priv->queue_format == GVE_DQO_QPL_FORMAT; +} + /* Returns the number of tx queue page lists */ static inline u32 gve_num_tx_qpls(struct gve_priv *priv) { - if (priv->queue_format != GVE_GQI_QPL_FORMAT) + if (!gve_is_qpl(priv)) return 0; return priv->tx_cfg.num_queues + priv->num_xdp_queues; @@ -947,7 +970,7 @@ static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) */ static inline u32 gve_num_rx_qpls(struct gve_priv *priv) { - if (priv->queue_format != GVE_GQI_QPL_FORMAT) + if (!gve_is_qpl(priv)) return 0; return priv->rx_cfg.num_queues; diff --git a/google/gve/gve_adminq.c b/google/gve/gve_adminq.c index 8148518..769a936 100644 --- a/google/gve/gve_adminq.c +++ b/google/gve/gve_adminq.c @@ -41,7 +41,8 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option_dqo_rda **dev_op_dqo_rda, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, - struct gve_device_option_flow_steering **dev_op_flow_steering) + struct gve_device_option_flow_steering **dev_op_flow_steering, + struct gve_device_option_dqo_qpl **dev_op_dqo_qpl) { u32 req_feat_mask = be32_to_cpu(option->required_features_mask); u16 option_length = be16_to_cpu(option->option_length); @@ -114,6 +115,22 @@ void gve_parse_device_option(struct gve_priv *priv, } *dev_op_dqo_rda = (void *)(option + 1); break; + case GVE_DEV_OPT_ID_DQO_QPL: + if (option_length < sizeof(**dev_op_dqo_qpl) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "DQO QPL", (int)sizeof(**dev_op_dqo_qpl), + GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_dqo_qpl)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL"); + } + *dev_op_dqo_qpl = (void *)(option + 1); + break; case GVE_DEV_OPT_ID_JUMBO_FRAMES: if (option_length < sizeof(**dev_op_jumbo_frames) || req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) { @@ -188,7 +205,8 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_option_dqo_rda **dev_op_dqo_rda, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, - struct gve_device_option_flow_steering **dev_op_flow_steering) + struct gve_device_option_flow_steering **dev_op_flow_steering, + struct gve_device_option_dqo_qpl **dev_op_dqo_qpl) { const int num_options = be16_to_cpu(descriptor->num_device_options); struct gve_device_option *dev_opt; @@ -209,7 +227,8 @@ gve_process_device_options(struct gve_priv *priv, gve_parse_device_option(priv, descriptor, dev_opt, dev_op_gqi_rda, dev_op_gqi_qpl, dev_op_dqo_rda, dev_op_jumbo_frames, - dev_op_buffer_sizes, dev_op_flow_steering); + dev_op_buffer_sizes, dev_op_flow_steering, + dev_op_dqo_qpl); dev_opt = next_opt; } @@ -590,12 +609,24 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); } else { + u16 comp_ring_size; + u32 qpl_id = 0; + + if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + qpl_id = GVE_RAW_ADDRESSING_QPL_ID; + comp_ring_size = + priv->options_dqo_rda.tx_comp_ring_entries; + } else { + qpl_id = tx->dqo.qpl->id; + comp_ring_size = priv->tx_desc_cnt; + } + cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_tx_queue.tx_ring_size = cpu_to_be16(priv->tx_desc_cnt); cmd.create_tx_queue.tx_comp_ring_addr = cpu_to_be64(tx->complq_bus_dqo); cmd.create_tx_queue.tx_comp_ring_size = - cpu_to_be16(priv->options_dqo_rda.tx_comp_ring_entries); + cpu_to_be16(comp_ring_size); } return gve_adminq_issue_cmd(priv, &cmd); @@ -640,6 +671,18 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); } else { + u16 rx_buff_ring_entries; + u32 qpl_id = 0; + + if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + qpl_id = GVE_RAW_ADDRESSING_QPL_ID; + rx_buff_ring_entries = + priv->options_dqo_rda.rx_buff_ring_entries; + } else { + qpl_id = rx->dqo.qpl->id; + rx_buff_ring_entries = priv->rx_desc_cnt; + } + cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_rx_queue.rx_ring_size = cpu_to_be16(priv->rx_desc_cnt); cmd.create_rx_queue.rx_desc_ring_addr = @@ -649,7 +692,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(priv->data_buffer_size_dqo); cmd.create_rx_queue.rx_buff_ring_size = - cpu_to_be16(priv->options_dqo_rda.rx_buff_ring_entries); + cpu_to_be16(rx_buff_ring_entries); cmd.create_rx_queue.enable_rsc = !!(priv->dev->features & NETIF_F_LRO); if (gve_get_enable_header_split(priv)) @@ -763,9 +806,13 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv, const struct gve_device_option_dqo_rda *dev_op_dqo_rda) { priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); + priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); + + if (priv->queue_format == GVE_DQO_QPL_FORMAT) + return 0; + priv->options_dqo_rda.tx_comp_ring_entries = be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries); - priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); priv->options_dqo_rda.rx_buff_ring_entries = be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries); @@ -777,7 +824,8 @@ static void gve_enable_supported_features( u32 supported_features_mask, const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames, const struct gve_device_option_buffer_sizes *dev_op_buffer_sizes, - const struct gve_device_option_flow_steering *dev_op_flow_steering) + const struct gve_device_option_flow_steering *dev_op_flow_steering, + const struct gve_device_option_dqo_qpl *dev_op_dqo_qpl) { int buf_size; @@ -832,6 +880,17 @@ static void gve_enable_supported_features( be16_to_cpu(dev_op_flow_steering->max_num_rules); } + /* Override pages for qpl for DQO-QPL */ + if (dev_op_dqo_qpl) { + priv->tx_pages_per_qpl = + be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl); + priv->rx_pages_per_qpl = + be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl); + if (priv->tx_pages_per_qpl == 0) + priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES; + if (priv->rx_pages_per_qpl == 0) + priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES; + } } int gve_adminq_describe_device(struct gve_priv *priv) @@ -842,6 +901,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL; + struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL; struct gve_device_descriptor *descriptor; u32 supported_features_mask = 0; union gve_adminq_command cmd; @@ -870,13 +930,14 @@ int gve_adminq_describe_device(struct gve_priv *priv) &dev_op_gqi_qpl, &dev_op_dqo_rda, &dev_op_jumbo_frames, &dev_op_buffer_sizes, - &dev_op_flow_steering); + &dev_op_flow_steering, + &dev_op_dqo_qpl); if (err) goto free_device_descriptor; /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format * is not set to GqiRda, choose the queue format in a priority order: - * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default. + * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default. */ if (dev_op_dqo_rda) { priv->queue_format = GVE_DQO_RDA_FORMAT; @@ -884,6 +945,10 @@ int gve_adminq_describe_device(struct gve_priv *priv) "Driver is running with DQO RDA queue format.\n"); supported_features_mask = be32_to_cpu(dev_op_dqo_rda->supported_features_mask); + } else if (dev_op_dqo_qpl) { + priv->queue_format = GVE_DQO_QPL_FORMAT; + supported_features_mask = + be32_to_cpu(dev_op_dqo_qpl->supported_features_mask); } else if (dev_op_gqi_rda) { priv->queue_format = GVE_GQI_RDA_FORMAT; dev_info(&priv->pdev->dev, @@ -938,7 +1003,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) gve_enable_supported_features(priv, supported_features_mask, dev_op_jumbo_frames, dev_op_buffer_sizes, - dev_op_flow_steering); + dev_op_flow_steering, + dev_op_dqo_qpl); free_device_descriptor: dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor, diff --git a/google/gve/gve_adminq.h b/google/gve/gve_adminq.h index 44e2483..2c69410 100644 --- a/google/gve/gve_adminq.h +++ b/google/gve/gve_adminq.h @@ -115,6 +115,14 @@ struct gve_device_option_dqo_rda { static_assert(sizeof(struct gve_device_option_dqo_rda) == 8); +struct gve_device_option_dqo_qpl { + __be32 supported_features_mask; + __be16 tx_pages_per_qpl; + __be16 rx_pages_per_qpl; +}; + +static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8); + struct gve_device_option_jumbo_frames { __be32 supported_features_mask; __be16 max_mtu; @@ -152,6 +160,7 @@ enum gve_dev_opt_id { GVE_DEV_OPT_ID_GQI_RDA = 0x2, GVE_DEV_OPT_ID_GQI_QPL = 0x3, GVE_DEV_OPT_ID_DQO_RDA = 0x4, + GVE_DEV_OPT_ID_DQO_QPL = 0x7, GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, GVE_DEV_OPT_ID_FLOW_STEERING = 0xb, @@ -164,6 +173,7 @@ enum gve_dev_opt_req_feat_mask { GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0, }; diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index 5c0a330..84304eb 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -31,7 +31,6 @@ // Minimum amount of time between queue kicks in msec (10 seconds) #define MIN_TX_TIMEOUT_GAP (1000 * 10) -#define DQO_TX_MAX 0x3FFFF const char gve_version_str[] = GVE_VERSION; static const char gve_version_prefix[] = GVE_VERSION_PREFIX; @@ -566,7 +565,7 @@ static int gve_setup_device_resources(struct gve_priv *priv) goto abort_with_stats_report; } - if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + if (!gve_is_gqi(priv)) { priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo), GFP_KERNEL); if (!priv->ptype_lut_dqo) { @@ -1160,11 +1159,12 @@ static int gve_alloc_xdp_qpls(struct gve_priv *priv) static int gve_alloc_qpls(struct gve_priv *priv) { int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues; + int page_count; int start_id; int i, j; int err; - if (priv->queue_format != GVE_GQI_QPL_FORMAT) + if (!gve_is_qpl(priv)) return 0; priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL); @@ -1172,17 +1172,23 @@ static int gve_alloc_qpls(struct gve_priv *priv) return -ENOMEM; start_id = gve_tx_start_qpl_id(priv); + page_count = priv->tx_pages_per_qpl; for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { - err = gve_alloc_queue_page_list(priv, i, - priv->tx_pages_per_qpl); + err = gve_alloc_queue_page_list(priv, i, page_count); if (err) goto free_qpls; } start_id = gve_rx_start_qpl_id(priv); + + /* For GQI_QPL number of pages allocated have 1:1 relationship with + * number of descriptors. For DQO, number of pages required are + * more than descriptors (because of out of order completions). + */ + page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ? + priv->rx_data_slot_cnt : priv->rx_pages_per_qpl; for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { - err = gve_alloc_queue_page_list(priv, i, - priv->rx_data_slot_cnt); + err = gve_alloc_queue_page_list(priv, i, page_count); if (err) goto free_qpls; } @@ -2176,7 +2182,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) /* Big TCP is only supported on DQ*/ if (!gve_is_gqi(priv)) - netif_set_tso_max_size(priv->dev, DQO_TX_MAX); + netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX); priv->num_registered_pages = 0; priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; From adf26f34bbe602cef13aae993b757129afcb8090 Mon Sep 17 00:00:00 2001 From: Rushil Gupta Date: Tue, 8 Aug 2023 21:08:45 +0000 Subject: [PATCH 20/28] gve: Tx path for QPL Each QPL page is divided into GVE_TX_BUFS_PER_PAGE_DQO buffers. When a packet needs to be transmitted, we break the packet into max GVE_TX_BUF_SIZE_DQO sized chunks and transmit each chunk using a TX descriptor. We allocate the TX buffers from the free list in dqo_tx. We store these TX buffer indices in an array in the pending_packet structure. The TX buffers are returned to the free list in dqo_compl after receiving packet completion or when removing packets from miss completions list. Signed-off-by: Rushil Gupta Reviewed-by: Willem de Bruijn Signed-off-by: Praveen Kaligineedi Signed-off-by: Bailey Forrest --- google/gve/gve.h | 77 +++++++- google/gve/gve_tx_dqo.c | 404 ++++++++++++++++++++++++++++++++-------- 2 files changed, 398 insertions(+), 83 deletions(-) diff --git a/google/gve/gve.h b/google/gve/gve.h index 8507635..0c57c84 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -69,6 +69,20 @@ /* Maximum TSO size supported on DQO */ #define GVE_DQO_TX_MAX 0x3FFFF +#define GVE_TX_BUF_SHIFT_DQO 11 + +/* 2K buffers for DQO-QPL */ +#define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO) +#define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO) +#define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO)) + +/* If number of free/recyclable buffers are less than this threshold; driver + * allocs and uses a non-qpl page on the receive path of DQO QPL to free + * up buffers. + * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC. + */ +#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96 + /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ struct gve_rx_desc_queue { struct gve_rx_desc *desc_ring; /* the descriptor ring */ @@ -365,8 +379,14 @@ struct gve_tx_pending_packet_dqo { * All others correspond to `skb`'s frags and should be unmapped with * `dma_unmap_page`. */ - DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); - DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); + union { + struct { + DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); + DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); + }; + s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT]; + }; + u16 num_bufs; /* Linked list index to next element in the list, or -1 if none */ @@ -421,6 +441,32 @@ struct gve_tx_ring { * set. */ u32 last_re_idx; + + /* free running number of packet buf descriptors posted */ + u16 posted_packet_desc_cnt; + /* free running number of packet buf descriptors completed */ + u16 completed_packet_desc_cnt; + + /* QPL fields */ + struct { + /* Linked list of gve_tx_buf_dqo. Index into + * tx_qpl_buf_next, or -1 if empty. + * + * This is a consumer list owned by the TX path. When it + * runs out, the producer list is stolen from the + * completion handling path + * (dqo_compl.free_tx_qpl_buf_head). + */ + s16 free_tx_qpl_buf_head; + + /* Free running count of the number of QPL tx buffers + * allocated + */ + u32 alloc_tx_qpl_buf_cnt; + + /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */ + u32 free_tx_qpl_buf_cnt; + }; } dqo_tx; }; @@ -468,6 +514,24 @@ struct gve_tx_ring { * reached a specified timeout. */ struct gve_index_list timed_out_completions; + + /* QPL fields */ + struct { + /* Linked list of gve_tx_buf_dqo. Index into + * tx_qpl_buf_next, or -1 if empty. + * + * This is the producer list, owned by the completion + * handling path. When the consumer list + * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list + * will be stolen. + */ + atomic_t free_tx_qpl_buf_head; + + /* Free running count of the number of tx buffers + * freed + */ + atomic_t free_tx_qpl_buf_cnt; + }; } dqo_compl; } ____cacheline_aligned; u64 pkt_done; /* free-running - total packets completed */ @@ -499,6 +563,15 @@ struct gve_tx_ring { struct { /* qpl assigned to this queue */ struct gve_queue_page_list *qpl; + + /* Each QPL page is divided into TX bounce buffers + * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is + * an array to manage linked lists of TX buffers. + * An entry j at index i implies that j'th buffer + * is next on the list after i + */ + s16 *tx_qpl_buf_next; + u32 num_tx_qpl_bufs; }; } dqo; } ____cacheline_aligned; diff --git a/google/gve/gve_tx_dqo.c b/google/gve/gve_tx_dqo.c index 637caf9..d87de61 100644 --- a/google/gve/gve_tx_dqo.c +++ b/google/gve/gve_tx_dqo.c @@ -13,6 +13,89 @@ #include #include +/* Returns true if tx_bufs are available. */ +static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count) +{ + int num_avail; + + if (!tx->dqo.qpl) + return true; + + num_avail = tx->dqo.num_tx_qpl_bufs - + (tx->dqo_tx.alloc_tx_qpl_buf_cnt - + tx->dqo_tx.free_tx_qpl_buf_cnt); + + if (count <= num_avail) + return true; + + /* Update cached value from dqo_compl. */ + tx->dqo_tx.free_tx_qpl_buf_cnt = + atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt); + + num_avail = tx->dqo.num_tx_qpl_bufs - + (tx->dqo_tx.alloc_tx_qpl_buf_cnt - + tx->dqo_tx.free_tx_qpl_buf_cnt); + + return count <= num_avail; +} + +static s16 +gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx) +{ + s16 index; + + index = tx->dqo_tx.free_tx_qpl_buf_head; + + /* No TX buffers available, try to steal the list from the + * completion handler. + */ + if (unlikely(index == -1)) { + tx->dqo_tx.free_tx_qpl_buf_head = + atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1); + index = tx->dqo_tx.free_tx_qpl_buf_head; + + if (unlikely(index == -1)) + return index; + } + + /* Remove TX buf from free list */ + tx->dqo_tx.free_tx_qpl_buf_head = tx->dqo.tx_qpl_buf_next[index]; + + return index; +} + +static void +gve_free_tx_qpl_bufs(struct gve_tx_ring *tx, + struct gve_tx_pending_packet_dqo *pkt) +{ + s16 index; + int i; + + if (!pkt->num_bufs) + return; + + index = pkt->tx_qpl_buf_ids[0]; + /* Create a linked list of buffers to be added to the free list */ + for (i = 1; i < pkt->num_bufs; i++) { + tx->dqo.tx_qpl_buf_next[index] = pkt->tx_qpl_buf_ids[i]; + index = pkt->tx_qpl_buf_ids[i]; + } + + while (true) { + s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_head); + + tx->dqo.tx_qpl_buf_next[index] = old_head; + if (atomic_cmpxchg(&tx->dqo_compl.free_tx_qpl_buf_head, + old_head, + pkt->tx_qpl_buf_ids[0]) == old_head) { + break; + } + } + + atomic_add(pkt->num_bufs, &tx->dqo_compl.free_tx_qpl_buf_cnt); + pkt->num_bufs = 0; +} + /* Returns true if a gve_tx_pending_packet_dqo object is available. */ static bool gve_has_pending_packet(struct gve_tx_ring *tx) { @@ -136,9 +219,40 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx) kvfree(tx->dqo.pending_packets); tx->dqo.pending_packets = NULL; + kvfree(tx->dqo.tx_qpl_buf_next); + tx->dqo.tx_qpl_buf_next = NULL; + + if (tx->dqo.qpl) { + gve_unassign_qpl(priv, tx->dqo.qpl->id); + tx->dqo.qpl = NULL; + } + netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); } +static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx) +{ + int num_tx_qpl_bufs = GVE_TX_BUFS_PER_PAGE_DQO * + tx->dqo.qpl->num_entries; + int i; + + tx->dqo.tx_qpl_buf_next = kvcalloc(num_tx_qpl_bufs, + sizeof(tx->dqo.tx_qpl_buf_next[0]), + GFP_KERNEL); + if (!tx->dqo.tx_qpl_buf_next) + return -ENOMEM; + + tx->dqo.num_tx_qpl_bufs = num_tx_qpl_bufs; + + /* Generate free TX buf list */ + for (i = 0; i < num_tx_qpl_bufs - 1; i++) + tx->dqo.tx_qpl_buf_next[i] = i + 1; + tx->dqo.tx_qpl_buf_next[num_tx_qpl_bufs - 1] = -1; + + atomic_set_release(&tx->dqo_compl.free_tx_qpl_buf_head, -1); + return 0; +} + static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) { struct gve_tx_ring *tx = &priv->tx[idx]; @@ -155,7 +269,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) /* Queue sizes must be a power of 2 */ tx->mask = priv->tx_desc_cnt - 1; - tx->dqo.complq_mask = priv->options_dqo_rda.tx_comp_ring_entries - 1; + tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ? + priv->options_dqo_rda.tx_comp_ring_entries - 1 : + tx->mask; /* The max number of pending packets determines the maximum number of * descriptors which maybe written to the completion queue. @@ -211,6 +327,15 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) if (!tx->q_resources) goto err; + if (gve_is_qpl(priv)) { + tx->dqo.qpl = gve_assign_tx_qpl(priv, idx); + if (!tx->dqo.qpl) + goto err; + + if (gve_tx_qpl_buf_init(tx)) + goto err; + } + gve_tx_add_to_block(priv, idx); return 0; @@ -267,20 +392,27 @@ static u32 num_avail_tx_slots(const struct gve_tx_ring *tx) return tx->mask - num_used; } +static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx, + int desc_count, int buf_count) +{ + return gve_has_pending_packet(tx) && + num_avail_tx_slots(tx) >= desc_count && + gve_has_free_tx_qpl_bufs(tx, buf_count); +} + /* Stops the queue if available descriptors is less than 'count'. * Return: 0 if stop is not required. */ -static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, int count) +static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, + int desc_count, int buf_count) { - if (likely(gve_has_pending_packet(tx) && - num_avail_tx_slots(tx) >= count)) + if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) return 0; /* Update cached TX head pointer */ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); - if (likely(gve_has_pending_packet(tx) && - num_avail_tx_slots(tx) >= count)) + if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) return 0; /* No space, so stop the queue */ @@ -295,8 +427,7 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, int count) */ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); - if (likely(!gve_has_pending_packet(tx) || - num_avail_tx_slots(tx) < count)) + if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) return -EBUSY; netif_tx_start_queue(tx->netdev_txq); @@ -444,44 +575,16 @@ gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc, }; } -/* Returns 0 on success, or < 0 on error. - * - * Before this function is called, the caller must ensure - * gve_has_pending_packet(tx) returns true. - */ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, - struct sk_buff *skb) + struct sk_buff *skb, + struct gve_tx_pending_packet_dqo *pkt, + s16 completion_tag, + u32 *desc_idx, + bool is_gso) { const struct skb_shared_info *shinfo = skb_shinfo(skb); - const bool is_gso = skb_is_gso(skb); - u32 desc_idx = tx->dqo_tx.tail; - - struct gve_tx_pending_packet_dqo *pkt; - struct gve_tx_metadata_dqo metadata; - s16 completion_tag; int i; - pkt = gve_alloc_pending_packet(tx); - pkt->skb = skb; - pkt->num_bufs = 0; - completion_tag = pkt - tx->dqo.pending_packets; - - gve_extract_tx_metadata_dqo(skb, &metadata); - if (is_gso) { - int header_len = gve_prep_tso(skb); - - if (unlikely(header_len < 0)) - goto err; - - gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx, - skb, &metadata, header_len); - desc_idx = (desc_idx + 1) & tx->mask; - } - - gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx, - &metadata); - desc_idx = (desc_idx + 1) & tx->mask; - /* Note: HW requires that the size of a non-TSO packet be within the * range of [17, 9728]. * @@ -490,6 +593,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, * - Hypervisor won't allow MTU larger than 9216. */ + pkt->num_bufs = 0; /* Map the linear portion of skb */ { u32 len = skb_headlen(skb); @@ -503,7 +607,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); ++pkt->num_bufs; - gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, completion_tag, /*eop=*/shinfo->nr_frags == 0, is_gso); } @@ -522,10 +626,139 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); ++pkt->num_bufs; - gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, completion_tag, is_eop, is_gso); } + return 0; +err: + for (i = 0; i < pkt->num_bufs; i++) { + if (i == 0) { + dma_unmap_single(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE); + } else { + dma_unmap_page(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE); + } + } + pkt->num_bufs = 0; + return -1; +} + +/* Tx buffer i corresponds to + * qpl_page_id = i / GVE_TX_BUFS_PER_PAGE_DQO + * qpl_page_offset = (i % GVE_TX_BUFS_PER_PAGE_DQO) * GVE_TX_BUF_SIZE_DQO + */ +static void gve_tx_buf_get_addr(struct gve_tx_ring *tx, + s16 index, + void **va, dma_addr_t *dma_addr) +{ + int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO); + int offset = (index & (GVE_TX_BUFS_PER_PAGE_DQO - 1)) << GVE_TX_BUF_SHIFT_DQO; + + *va = page_address(tx->dqo.qpl->pages[page_id]) + offset; + *dma_addr = tx->dqo.qpl->page_buses[page_id] + offset; +} + +static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx, + struct sk_buff *skb, + struct gve_tx_pending_packet_dqo *pkt, + s16 completion_tag, + u32 *desc_idx, + bool is_gso) +{ + u32 copy_offset = 0; + dma_addr_t dma_addr; + u32 copy_len; + s16 index; + void *va; + + /* Break the packet into buffer size chunks */ + pkt->num_bufs = 0; + while (copy_offset < skb->len) { + index = gve_alloc_tx_qpl_buf(tx); + if (unlikely(index == -1)) + goto err; + + gve_tx_buf_get_addr(tx, index, &va, &dma_addr); + copy_len = min_t(u32, GVE_TX_BUF_SIZE_DQO, + skb->len - copy_offset); + skb_copy_bits(skb, copy_offset, va, copy_len); + + copy_offset += copy_len; + dma_sync_single_for_device(tx->dev, dma_addr, + copy_len, DMA_TO_DEVICE); + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, + copy_len, + dma_addr, + completion_tag, + copy_offset == skb->len, + is_gso); + + pkt->tx_qpl_buf_ids[pkt->num_bufs] = index; + ++tx->dqo_tx.alloc_tx_qpl_buf_cnt; + ++pkt->num_bufs; + } + + return 0; +err: + /* Should not be here if gve_has_free_tx_qpl_bufs() check is correct */ + gve_free_tx_qpl_bufs(tx, pkt); + return -ENOMEM; +} + +/* Returns 0 on success, or < 0 on error. + * + * Before this function is called, the caller must ensure + * gve_has_pending_packet(tx) returns true. + */ +static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx, + struct sk_buff *skb) +{ + const bool is_gso = skb_is_gso(skb); + u32 desc_idx = tx->dqo_tx.tail; + struct gve_tx_pending_packet_dqo *pkt; + struct gve_tx_metadata_dqo metadata; + s16 completion_tag; + + pkt = gve_alloc_pending_packet(tx); + pkt->skb = skb; + completion_tag = pkt - tx->dqo.pending_packets; + + gve_extract_tx_metadata_dqo(skb, &metadata); + if (is_gso) { + int header_len = gve_prep_tso(skb); + + if (unlikely(header_len < 0)) + goto err; + + gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx, + skb, &metadata, header_len); + desc_idx = (desc_idx + 1) & tx->mask; + } + + gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx, + &metadata); + desc_idx = (desc_idx + 1) & tx->mask; + + if (tx->dqo.qpl) { + if (gve_tx_add_skb_copy_dqo(tx, skb, pkt, + completion_tag, + &desc_idx, is_gso)) + goto err; + } else { + if (gve_tx_add_skb_no_copy_dqo(tx, skb, pkt, + completion_tag, + &desc_idx, is_gso)) + goto err; + } + + tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs; + /* Commit the changes to our state */ tx->dqo_tx.tail = desc_idx; @@ -547,22 +780,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, return 0; err: - for (i = 0; i < pkt->num_bufs; i++) { - if (i == 0) { - dma_unmap_single(tx->dev, - dma_unmap_addr(pkt, dma[i]), - dma_unmap_len(pkt, len[i]), - DMA_TO_DEVICE); - } else { - dma_unmap_page(tx->dev, - dma_unmap_addr(pkt, dma[i]), - dma_unmap_len(pkt, len[i]), - DMA_TO_DEVICE); - } - } - pkt->skb = NULL; - pkt->num_bufs = 0; gve_free_pending_packet(tx, pkt); return -1; @@ -636,40 +854,56 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, int num_buffer_descs; int total_num_descs; - if (skb_is_gso(skb)) { - /* If TSO doesn't meet HW requirements, attempt to linearize the - * packet. - */ - if (unlikely(!gve_can_send_tso(skb) && - skb_linearize(skb) < 0)) { - net_err_ratelimited("%s: Failed to transmit TSO packet\n", - priv->dev->name); - goto drop; - } - - if (unlikely(ipv6_hopopt_jumbo_remove(skb))) - goto drop; + if (tx->dqo.qpl) { + if (skb_is_gso(skb)) + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto drop; - num_buffer_descs = gve_num_buffer_descs_needed(skb); + /* We do not need to verify the number of buffers used per + * packet or per segment in case of TSO as with 2K size buffers + * none of the TX packet rules would be violated. + * + * gve_can_send_tso() checks that each TCP segment of gso_size is + * not distributed over more than 9 SKB frags.. + */ + num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO); } else { - num_buffer_descs = gve_num_buffer_descs_needed(skb); + if (skb_is_gso(skb)) { + /* If TSO doesn't meet HW requirements, attempt to linearize the + * packet. + */ + if (unlikely(!gve_can_send_tso(skb) && + skb_linearize(skb) < 0)) { + net_err_ratelimited("%s: Failed to transmit TSO packet\n", + priv->dev->name); + goto drop; + } - if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) { - if (unlikely(skb_linearize(skb) < 0)) + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) goto drop; - num_buffer_descs = 1; + num_buffer_descs = gve_num_buffer_descs_needed(skb); + } else { + num_buffer_descs = gve_num_buffer_descs_needed(skb); + + if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) { + if (unlikely(skb_linearize(skb) < 0)) + goto drop; + + num_buffer_descs = 1; + } } } /* Metadata + (optional TSO) + data descriptors. */ total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs; if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs + - GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP))) { + GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP, + num_buffer_descs))) { return -1; } - if (unlikely(gve_tx_add_skb_no_copy_dqo(tx, skb) < 0)) + if (unlikely(gve_tx_add_skb_dqo(tx, skb) < 0)) goto drop; netdev_tx_sent_queue(tx->netdev_txq, skb->len); @@ -817,7 +1051,11 @@ static void gve_handle_packet_completion(struct gve_priv *priv, return; } } - gve_unmap_packet(tx->dev, pending_packet); + tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs; + if (tx->dqo.qpl) + gve_free_tx_qpl_bufs(tx, pending_packet); + else + gve_unmap_packet(tx->dev, pending_packet); *bytes += pending_packet->skb->len; (*pkts)++; @@ -875,12 +1113,16 @@ static void remove_miss_completions(struct gve_priv *priv, remove_from_list(tx, &tx->dqo_compl.miss_completions, pending_packet); - /* Unmap buffers and free skb but do not unallocate packet i.e. + /* Unmap/free TX buffers and free skb but do not unallocate packet i.e. * the completion tag is not freed to ensure that the driver * can take appropriate action if a corresponding valid * completion is received later. */ - gve_unmap_packet(tx->dev, pending_packet); + if (tx->dqo.qpl) + gve_free_tx_qpl_bufs(tx, pending_packet); + else + gve_unmap_packet(tx->dev, pending_packet); + /* This indicates the packet was dropped. */ dev_kfree_skb_any(pending_packet->skb); pending_packet->skb = NULL; From cfdda78b13b53de8cc2a53af760250a60aa72165 Mon Sep 17 00:00:00 2001 From: Rushil Gupta Date: Tue, 8 Aug 2023 21:40:11 +0000 Subject: [PATCH 21/28] gve: RX path for DQO-QPL The RX path allocates the QPL page pool at queue creation, and tries to reuse these pages through page recycling. This patch ensures that on refill no non-QPL pages are posted to the device. When the driver is running low on free buffers, an ondemand allocation step kicks in that allocates a non-qpl page for SKB business to free up the QPL page in use. gve_try_recycle_buf was moved to gve_rx_append_frags so that driver does not attempt to mark buffer as used if a non-qpl page was allocated ondemand. Signed-off-by: Rushil Gupta Reviewed-by: Willem de Bruijn Signed-off-by: Praveen Kaligineedi Signed-off-by: Bailey Forrest --- google/gve/gve.h | 6 ++ google/gve/gve_rx_dqo.c | 128 +++++++++++++++++++++---- patches/ipv6_hopopt_jumbo_remove.cocci | 2 +- 3 files changed, 117 insertions(+), 19 deletions(-) diff --git a/google/gve/gve.h b/google/gve/gve.h index 0c57c84..43e6a88 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -263,6 +263,12 @@ struct gve_rx_ring { /* qpl assigned to this queue */ struct gve_queue_page_list *qpl; + + /* index into queue page list */ + u32 next_qpl_page_idx; + + /* track number of used buffers */ + u16 used_buf_states_cnt; } dqo; }; diff --git a/google/gve/gve_rx_dqo.c b/google/gve/gve_rx_dqo.c index f18a35a..62f3158 100644 --- a/google/gve/gve_rx_dqo.c +++ b/google/gve/gve_rx_dqo.c @@ -22,11 +22,13 @@ static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs) } static void gve_free_page_dqo(struct gve_priv *priv, - struct gve_rx_buf_state_dqo *bs) + struct gve_rx_buf_state_dqo *bs, + bool free_page) { page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1); - gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr, - DMA_FROM_DEVICE); + if (free_page) + gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr, + DMA_FROM_DEVICE); bs->page_info.page = NULL; } @@ -137,12 +139,20 @@ gve_get_recycled_buf_state(struct gve_rx_ring *rx) */ for (i = 0; i < 5; i++) { buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); - if (gve_buf_ref_cnt(buf_state) == 0) + if (gve_buf_ref_cnt(buf_state) == 0) { + rx->dqo.used_buf_states_cnt--; return buf_state; + } gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); } + /* For QPL, we cannot allocate any new buffers and must + * wait for the existing ones to be available. + */ + if (rx->dqo.qpl) + return NULL; + /* If there are no free buf states discard an entry from * `used_buf_states` so it can be used. */ @@ -151,23 +161,39 @@ gve_get_recycled_buf_state(struct gve_rx_ring *rx) if (gve_buf_ref_cnt(buf_state) == 0) return buf_state; - gve_free_page_dqo(rx->gve, buf_state); + gve_free_page_dqo(rx->gve, buf_state, true); gve_free_buf_state(rx, buf_state); } return NULL; } -static int gve_alloc_page_dqo(struct gve_priv *priv, +static int gve_alloc_page_dqo(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { - int err; + struct gve_priv *priv = rx->gve; + u32 idx; - err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page, - &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC); - if (err) - return err; + if (!rx->dqo.qpl) { + int err; + err = gve_alloc_page(priv, &priv->pdev->dev, + &buf_state->page_info.page, + &buf_state->addr, + DMA_FROM_DEVICE, GFP_ATOMIC); + if (err) + return err; + } else { + idx = rx->dqo.next_qpl_page_idx; + if (idx >= priv->rx_pages_per_qpl) { + net_err_ratelimited("%s: Out of QPL pages\n", + priv->dev->name); + return -ENOMEM; + } + buf_state->page_info.page = rx->dqo.qpl->pages[idx]; + buf_state->addr = rx->dqo.qpl->page_buses[idx]; + rx->dqo.next_qpl_page_idx++; + } buf_state->page_info.page_offset = 0; buf_state->page_info.page_address = page_address(buf_state->page_info.page); @@ -202,9 +228,13 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx) for (i = 0; i < rx->dqo.num_buf_states; i++) { struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; - + /* Only free page for RDA. QPL pages are freed in gve_main. */ if (bs->page_info.page) - gve_free_page_dqo(priv, bs); + gve_free_page_dqo(priv, bs, !rx->dqo.qpl); + } + if (rx->dqo.qpl) { + gve_unassign_qpl(priv, rx->dqo.qpl->id); + rx->dqo.qpl = NULL; } if (rx->dqo.bufq.desc_ring) { @@ -246,7 +276,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) int i; const u32 buffer_queue_slots = - priv->options_dqo_rda.rx_buff_ring_entries; + priv->queue_format == GVE_DQO_RDA_FORMAT ? + priv->options_dqo_rda.rx_buff_ring_entries : priv->rx_desc_cnt; const u32 completion_queue_slots = priv->rx_desc_cnt; netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); @@ -260,7 +291,10 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) rx->ctx.skb_head = NULL; rx->ctx.skb_tail = NULL; - rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4); + rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ? + min_t(s16, S16_MAX, buffer_queue_slots * 4) : + priv->rx_pages_per_qpl; + rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, sizeof(rx->dqo.buf_states[0]), GFP_KERNEL); @@ -309,6 +343,13 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) if (!rx->dqo.bufq.desc_ring) goto err; + if (priv->queue_format != GVE_DQO_RDA_FORMAT) { + rx->dqo.qpl = gve_assign_rx_qpl(priv, rx->q_num); + if (!rx->dqo.qpl) + goto err; + rx->dqo.next_qpl_page_idx = 0; + } + rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources), &rx->q_resources_bus, GFP_KERNEL); if (!rx->q_resources) @@ -400,7 +441,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) if (unlikely(!buf_state)) break; - if (unlikely(gve_alloc_page_dqo(priv, buf_state))) { + if (unlikely(gve_alloc_page_dqo(rx, buf_state))) { u64_stats_update_begin(&rx->statss); rx->rx_buf_alloc_fail++; u64_stats_update_end(&rx->statss); @@ -469,6 +510,7 @@ static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, mark_used: gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); + rx->dqo.used_buf_states_cnt++; } static void gve_rx_skb_csum(struct sk_buff *skb, @@ -529,6 +571,42 @@ static void gve_rx_free_skb(struct gve_rx_ring *rx) rx->ctx.skb_tail = NULL; } +static bool gve_rx_should_trigger_copy_ondemand(struct gve_rx_ring *rx) +{ + if (!rx->dqo.qpl) + return false; + if (rx->dqo.used_buf_states_cnt < + (rx->dqo.num_buf_states - + GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD)) + return false; + return true; +} + +static int gve_rx_copy_ondemand(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state, + u16 buf_len) +{ + struct page *page = alloc_page(GFP_ATOMIC); + int num_frags; + + if (!page) + return -ENOMEM; + + memcpy(page_address(page), + buf_state->page_info.page_address + + buf_state->page_info.page_offset, + buf_len); + num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; + skb_add_rx_frag(rx->ctx.skb_tail, num_frags, page, + 0, buf_len, PAGE_SIZE); + + u64_stats_update_begin(&rx->statss); + rx->rx_frag_alloc_cnt++; + u64_stats_update_end(&rx->statss); + gve_recycle_buf(rx, buf_state); + return 0; +} + /* Chains multi skbs for single rx packet. * Returns 0 if buffer is appended, -1 otherwise. */ @@ -556,12 +634,20 @@ static int gve_rx_append_frags(struct napi_struct *napi, rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo; } + /* Trigger ondemand page allocation if we are running low on buffers */ + if (gve_rx_should_trigger_copy_ondemand(rx)) + return gve_rx_copy_ondemand(rx, buf_state, buf_len); + skb_add_rx_frag(rx->ctx.skb_tail, num_frags, buf_state->page_info.page, buf_state->page_info.page_offset, buf_len, priv->data_buffer_size_dqo); gve_dec_pagecnt_bias(&buf_state->page_info); + /* Advances buffer page-offset if page is partially used. + * Marks buffer as used if page is full. + */ + gve_try_recycle_buf(priv, rx, buf_state); return 0; } @@ -655,7 +741,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, priv)) != 0) goto error; - gve_try_recycle_buf(priv, rx, buf_state); return 0; } @@ -680,6 +765,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, goto error; rx->ctx.skb_tail = rx->ctx.skb_head; + if (gve_rx_should_trigger_copy_ondemand(rx)) { + if (gve_rx_copy_ondemand(rx, buf_state, buf_len) < 0) + goto error; + return 0; + } + skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, buf_state->page_info.page_offset, buf_len, priv->data_buffer_size_dqo); @@ -689,7 +780,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, return 0; error: - dev_err(&priv->pdev->dev, "%s: Error return", priv->dev->name); + net_err_ratelimited("%s: Error returning from Rx DQO\n", + priv->dev->name); gve_recycle_buf(rx, buf_state); return -ENOMEM; } diff --git a/patches/ipv6_hopopt_jumbo_remove.cocci b/patches/ipv6_hopopt_jumbo_remove.cocci index 63cd78b..05632e8 100644 --- a/patches/ipv6_hopopt_jumbo_remove.cocci +++ b/patches/ipv6_hopopt_jumbo_remove.cocci @@ -4,7 +4,7 @@ expression priv; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,2,1)) if (!gve_is_gqi(priv)) - netif_set_tso_max_size(priv->dev, DQO_TX_MAX); + netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(6,2,1) */ @@ From f43ded1aab97aaf202f66151a3a607d567ec0d7e Mon Sep 17 00:00:00 2001 From: Rushil Gupta Date: Tue, 8 Aug 2023 23:20:59 +0000 Subject: [PATCH 22/28] gve: Bump up GVE_VERSION to v1.4.0rc3 --- google/gve/gve_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index 84304eb..51dbaad 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -26,7 +26,7 @@ #define GVE_DEFAULT_RX_COPYBREAK (256) #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK) -#define GVE_VERSION "1.4.0rc2" +#define GVE_VERSION "1.4.0rc3" #define GVE_VERSION_PREFIX "GVE-" // Minimum amount of time between queue kicks in msec (10 seconds) From 48b4502e823508094b71a3de029d4695c28867c9 Mon Sep 17 00:00:00 2001 From: Ziwei Xiao Date: Tue, 12 Sep 2023 11:04:33 -0700 Subject: [PATCH 23/28] gve: Enable header-split without gve_close/gve_open --- google/gve/gve.h | 4 + google/gve/gve_adminq.c | 2 +- google/gve/gve_dqo.h | 2 + google/gve/gve_ethtool.c | 39 +++++-- google/gve/gve_main.c | 165 +++++++++++++++++++++------- google/gve/gve_rx_dqo.c | 232 ++++++++++++++++++++++++++++++--------- 6 files changed, 339 insertions(+), 105 deletions(-) diff --git a/google/gve/gve.h b/google/gve/gve.h index 43e6a88..19cf23b 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -1170,6 +1170,10 @@ int gve_rx_poll(struct gve_notify_block *block, int budget); bool gve_rx_work_pending(struct gve_rx_ring *rx); int gve_rx_alloc_rings(struct gve_priv *priv); void gve_rx_free_rings_gqi(struct gve_priv *priv); +int gve_recreate_rx_rings(struct gve_priv *priv); +int gve_reconfigure_rx_rings(struct gve_priv *priv, + bool enable_hdr_split, + int packet_buffer_size); /* Reset */ void gve_schedule_reset(struct gve_priv *priv); int gve_reset(struct gve_priv *priv, bool attempt_teardown); diff --git a/google/gve/gve_adminq.c b/google/gve/gve_adminq.c index 769a936..310f267 100644 --- a/google/gve/gve_adminq.c +++ b/google/gve/gve_adminq.c @@ -695,7 +695,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) cpu_to_be16(rx_buff_ring_entries); cmd.create_rx_queue.enable_rsc = !!(priv->dev->features & NETIF_F_LRO); - if (gve_get_enable_header_split(priv)) + if (rx->dqo.hdr_bufs) cmd.create_rx_queue.header_buffer_size = cpu_to_be16(priv->header_buf_size); } diff --git a/google/gve/gve_dqo.h b/google/gve/gve_dqo.h index 9579162..89fa1f0 100644 --- a/google/gve/gve_dqo.h +++ b/google/gve/gve_dqo.h @@ -40,10 +40,12 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv); void gve_tx_free_rings_dqo(struct gve_priv *priv); int gve_rx_alloc_rings_dqo(struct gve_priv *priv); void gve_rx_free_rings_dqo(struct gve_priv *priv); +void gve_rx_reset_rings_dqo(struct gve_priv *priv); int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, struct napi_struct *napi); void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx); void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx); +int gve_rx_handle_hdr_resources_dqo(struct gve_priv *priv, bool enable_hdr_split); static inline void gve_tx_put_doorbell_dqo(const struct gve_priv *priv, diff --git a/google/gve/gve_ethtool.c b/google/gve/gve_ethtool.c index 5db6e81..95f933c 100644 --- a/google/gve/gve_ethtool.c +++ b/google/gve/gve_ethtool.c @@ -563,8 +563,8 @@ static u32 gve_get_priv_flags(struct net_device *netdev) static int gve_set_priv_flags(struct net_device *netdev, u32 flags) { struct gve_priv *priv = netdev_priv(netdev); - bool need_adjust_queues = false; - u64 ori_flags, flag_diff; + u64 ori_flags, new_flags, flag_diff; + int new_packet_buffer_size; int num_tx_queues; /* If turning off header split, strict header split will be turned off too*/ @@ -596,7 +596,31 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags) num_tx_queues = gve_num_tx_queues(priv); ori_flags = READ_ONCE(priv->ethtool_flags); - priv->ethtool_flags = flags & GVE_PRIV_FLAGS_MASK; + new_flags = flags & GVE_PRIV_FLAGS_MASK; + + flag_diff = new_flags ^ ori_flags; + + if ((flag_diff & BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT)) || + (flag_diff & BIT(GVE_PRIV_FLAGS_ENABLE_MAX_RX_BUFFER_SIZE))) { + bool enable_hdr_split = + new_flags & BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT); + bool enable_max_buffer_size = + new_flags & BIT(GVE_PRIV_FLAGS_ENABLE_MAX_RX_BUFFER_SIZE); + int err; + + if (enable_max_buffer_size) + new_packet_buffer_size = priv->dev_max_rx_buffer_size; + else + new_packet_buffer_size = GVE_RX_BUFFER_SIZE_DQO; + + err = gve_reconfigure_rx_rings(priv, + enable_hdr_split, + new_packet_buffer_size); + if (err) + return err; + } + + priv->ethtool_flags = new_flags; /* start report-stats timer when user turns report stats on. */ if (flags & BIT(0)) { @@ -620,15 +644,6 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags) (priv->ethtool_flags & BIT(GVE_PRIV_FLAGS_ENABLE_STRICT_HEADER_SPLIT)) ? true : false; - flag_diff = priv->ethtool_flags ^ ori_flags; - - if ((flag_diff & BIT(GVE_PRIV_FLAGS_ENABLE_HEADER_SPLIT)) || - (flag_diff & BIT(GVE_PRIV_FLAGS_ENABLE_MAX_RX_BUFFER_SIZE))) - need_adjust_queues = true; - - if (need_adjust_queues) - return gve_adjust_queues(priv, priv->rx_cfg, priv->tx_cfg); - return 0; } diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index 51dbaad..f15683f 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -786,24 +786,11 @@ static int gve_create_xdp_rings(struct gve_priv *priv) return 0; } -static int gve_create_rings(struct gve_priv *priv) +static int gve_create_rx_rings(struct gve_priv *priv) { - int num_tx_queues = gve_num_tx_queues(priv); int err; int i; - err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues); - if (err) { - netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", - num_tx_queues); - /* This failure will trigger a reset - no need to clean - * up - */ - return err; - } - netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", - num_tx_queues); - err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); if (err) { netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n", @@ -852,6 +839,39 @@ static void add_napi_init_xdp_sync_stats(struct gve_priv *priv, } } +static int gve_create_tx_rings(struct gve_priv *priv, int start_id, u32 num_tx_queues) +{ + int err; + + err = gve_adminq_create_tx_queues(priv, start_id, num_tx_queues); + if (err) { + netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", + num_tx_queues); + /* This failure will trigger a reset - no need to clean + * up + */ + return err; + } + netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", + num_tx_queues); + + return 0; +} + +static int gve_create_rings(struct gve_priv *priv) +{ + int num_tx_queues = gve_num_tx_queues(priv); + int err; + + err = gve_create_tx_rings(priv, 0, num_tx_queues); + if (err) + return err; + + err = gve_create_rx_rings(priv); + + return err; +} + static void add_napi_init_sync_stats(struct gve_priv *priv, int (*napi_poll)(struct napi_struct *napi, int budget)) @@ -952,32 +972,27 @@ static int gve_alloc_rings(struct gve_priv *priv) return err; } -static int gve_destroy_xdp_rings(struct gve_priv *priv) +static int gve_destroy_rx_rings(struct gve_priv *priv) { - int start_id; int err; - start_id = gve_xdp_tx_start_queue_id(priv); - err = gve_adminq_destroy_tx_queues(priv, - start_id, - priv->num_xdp_queues); + err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues); if (err) { netif_err(priv, drv, priv->dev, - "failed to destroy XDP queues\n"); + "failed to destroy rx queues\n"); /* This failure will trigger a reset - no need to clean up */ return err; } - netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n"); + netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n"); return 0; } -static int gve_destroy_rings(struct gve_priv *priv) +static int gve_destroy_tx_rings(struct gve_priv *priv, int start_id, u32 num_queues) { - int num_tx_queues = gve_num_tx_queues(priv); int err; - err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues); + err = gve_adminq_destroy_tx_queues(priv, start_id, num_queues); if (err) { netif_err(priv, drv, priv->dev, "failed to destroy tx queues\n"); @@ -985,14 +1000,36 @@ static int gve_destroy_rings(struct gve_priv *priv) return err; } netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n"); - err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues); - if (err) { - netif_err(priv, drv, priv->dev, - "failed to destroy rx queues\n"); - /* This failure will trigger a reset - no need to clean up */ + + return 0; +} + +static int gve_destroy_xdp_rings(struct gve_priv *priv) +{ + int start_id; + int err; + + start_id = gve_xdp_tx_start_queue_id(priv); + err = gve_destroy_tx_rings(priv, + start_id, + priv->num_xdp_queues); + + return err; +} + +static int gve_destroy_rings(struct gve_priv *priv) +{ + int num_tx_queues = gve_num_tx_queues(priv); + int err; + + err = gve_destroy_tx_rings(priv, 0, num_tx_queues); + if (err) return err; - } - netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n"); + + err = gve_destroy_rx_rings(priv); + if (err) + return err; + return 0; } @@ -1531,12 +1568,64 @@ static void gve_handle_link_status(struct gve_priv *priv, bool link_status) } } +static void gve_turnup_and_check_status(struct gve_priv *priv) +{ + u32 status; + + gve_turnup(priv); + status = ioread32be(&priv->reg_bar0->device_status); + gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); +} + +int gve_recreate_rx_rings(struct gve_priv *priv) +{ + int err; + + /* Unregister queues with the device*/ + err = gve_destroy_rx_rings(priv); + if (err) + return err; + + /* Reset the RX state */ + gve_rx_reset_rings_dqo(priv); + + /* Register queues with the device */ + return gve_create_rx_rings(priv); +} + +int gve_reconfigure_rx_rings(struct gve_priv *priv, + bool enable_hdr_split, + int packet_buffer_size) +{ + int err = 0; + + if (priv->queue_format != GVE_DQO_RDA_FORMAT) + return -EOPNOTSUPP; + + gve_turndown(priv); + + /* Allocate/free hdr resources */ + if (enable_hdr_split != !!priv->header_buf_pool) { + err = gve_rx_handle_hdr_resources_dqo(priv, enable_hdr_split); + if (err) + goto err; + } + + /* Apply new RX configuration changes */ + priv->data_buffer_size_dqo = packet_buffer_size; + + /* Reset RX state and re-register with the device */ + err = gve_recreate_rx_rings(priv); +err: + gve_turnup_and_check_status(priv); + return err; +} + static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct bpf_prog *old_prog; int err = 0; - u32 status; old_prog = READ_ONCE(priv->xdp_prog); if (!netif_carrier_ok(priv->dev)) { @@ -1565,9 +1654,7 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, bpf_prog_put(old_prog); out: - gve_turnup(priv); - status = ioread32be(&priv->reg_bar0->device_status); - gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); + gve_turnup_and_check_status(priv); return err; } @@ -1788,12 +1875,6 @@ static int gve_adjust_queue_count(struct gve_priv *priv, priv->rx_cfg = new_rx_config; priv->tx_cfg = new_tx_config; - if (gve_get_enable_max_rx_buffer_size(priv)) - priv->data_buffer_size_dqo = GVE_MAX_RX_BUFFER_SIZE; - else - priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; - - if (old_rx_config.num_queues != new_rx_config.num_queues) { err = gve_flow_rules_reset(priv); if (err) diff --git a/google/gve/gve_rx_dqo.c b/google/gve/gve_rx_dqo.c index 62f3158..3669e9c 100644 --- a/google/gve/gve_rx_dqo.c +++ b/google/gve/gve_rx_dqo.c @@ -206,6 +206,23 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx, return 0; } +static void gve_rx_free_hdr_bufs(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + int buffer_queue_slots = rx->dqo.bufq.mask + 1; + int i; + + if (rx->dqo.hdr_bufs) { + for (i = 0; i < buffer_queue_slots; i++) + if (rx->dqo.hdr_bufs[i].data) + dma_pool_free(priv->header_buf_pool, + rx->dqo.hdr_bufs[i].data, + rx->dqo.hdr_bufs[i].addr); + kvfree(rx->dqo.hdr_bufs); + rx->dqo.hdr_bufs = NULL; + } +} + static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx) { struct gve_rx_ring *rx = &priv->rx[idx]; @@ -255,25 +272,112 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx) kvfree(rx->dqo.buf_states); rx->dqo.buf_states = NULL; - if (rx->dqo.hdr_bufs) { - for (i = 0; i < buffer_queue_slots; i++) - if (rx->dqo.hdr_bufs[i].data) - dma_pool_free(priv->header_buf_pool, - rx->dqo.hdr_bufs[i].data, - rx->dqo.hdr_bufs[i].addr); - kvfree(rx->dqo.hdr_bufs); - rx->dqo.hdr_bufs = NULL; - } + gve_rx_free_hdr_bufs(priv, idx); netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); } +static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + int buffer_queue_slots = rx->dqo.bufq.mask + 1; + int i; + + rx->dqo.hdr_bufs = kvcalloc(buffer_queue_slots, + sizeof(rx->dqo.hdr_bufs[0]), + GFP_KERNEL); + if (!rx->dqo.hdr_bufs) + return -ENOMEM; + + for (i = 0; i < buffer_queue_slots; i++) { + rx->dqo.hdr_bufs[i].data = + dma_pool_alloc(priv->header_buf_pool, + GFP_KERNEL, + &rx->dqo.hdr_bufs[i].addr); + if (!rx->dqo.hdr_bufs[i].data) + goto err; + } + + return 0; +err: + gve_rx_free_hdr_bufs(priv, idx); + return -ENOMEM; +} + +static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx, + const u32 buffer_queue_slots, + const u32 completion_queue_slots) +{ + int i; + + /* Set buffer queue state */ + rx->dqo.bufq.mask = buffer_queue_slots - 1; + rx->dqo.bufq.head = 0; + rx->dqo.bufq.tail = 0; + + /* Set completion queue state */ + rx->dqo.complq.num_free_slots = completion_queue_slots; + rx->dqo.complq.mask = completion_queue_slots - 1; + rx->dqo.complq.cur_gen_bit = 0; + rx->dqo.complq.head = 0; + + /* Set RX SKB context */ + rx->ctx.skb_head = NULL; + rx->ctx.skb_tail = NULL; + + /* Set up linked list of buffer IDs */ + for (i = 0; i < rx->dqo.num_buf_states - 1; i++) + rx->dqo.buf_states[i].next = i + 1; + rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; + + rx->dqo.free_buf_states = 0; + rx->dqo.recycled_buf_states.head = -1; + rx->dqo.recycled_buf_states.tail = -1; + rx->dqo.used_buf_states.head = -1; + rx->dqo.used_buf_states.tail = -1; +} + +static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + size_t size; + int i; + + const u32 buffer_queue_slots = priv->rx_desc_cnt; + const u32 completion_queue_slots = priv->rx_desc_cnt; + + netif_dbg(priv, drv, priv->dev, "Resetting rx ring \n"); + + /* Reset buffer queue */ + size = sizeof(rx->dqo.bufq.desc_ring[0]) * + buffer_queue_slots; + memset(rx->dqo.bufq.desc_ring, 0 , size); + + /* Reset completion queue */ + size = sizeof(rx->dqo.complq.desc_ring[0]) * + completion_queue_slots; + memset(rx->dqo.complq.desc_ring, 0, size); + + /* Reset q_resources */ + memset(rx->q_resources, 0, sizeof(*rx->q_resources)); + + /* Reset buf states */ + for (i = 0; i < rx->dqo.num_buf_states; i++) { + struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; + + if (bs->page_info.page) + gve_free_page_dqo(priv, bs, !rx->dqo.qpl); + } + + gve_rx_init_ring_state_dqo(rx, buffer_queue_slots, + completion_queue_slots); +} + static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) { struct gve_rx_ring *rx = &priv->rx[idx]; struct device *hdev = &priv->pdev->dev; size_t size; - int i; const u32 buffer_queue_slots = priv->queue_format == GVE_DQO_RDA_FORMAT ? @@ -285,12 +389,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) memset(rx, 0, sizeof(*rx)); rx->gve = priv; rx->q_num = idx; - rx->dqo.bufq.mask = buffer_queue_slots - 1; - rx->dqo.complq.num_free_slots = completion_queue_slots; - rx->dqo.complq.mask = completion_queue_slots - 1; - rx->ctx.skb_head = NULL; - rx->ctx.skb_tail = NULL; - + + /* Allocate buf states */ rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ? min_t(s16, S16_MAX, buffer_queue_slots * 4) : priv->rx_pages_per_qpl; @@ -301,33 +401,6 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) if (!rx->dqo.buf_states) return -ENOMEM; - /* Allocate header buffers for header-split */ - if (priv->header_buf_pool) { - rx->dqo.hdr_bufs = kvcalloc(buffer_queue_slots, - sizeof(rx->dqo.hdr_bufs[0]), - GFP_KERNEL); - if (!rx->dqo.hdr_bufs) - goto err; - for (i = 0; i < buffer_queue_slots; i++) { - rx->dqo.hdr_bufs[i].data = - dma_pool_alloc(priv->header_buf_pool, - GFP_KERNEL, - &rx->dqo.hdr_bufs[i].addr); - if (!rx->dqo.hdr_bufs[i].data) - goto err; - } - } - - /* Set up linked list of buffer IDs */ - for (i = 0; i < rx->dqo.num_buf_states - 1; i++) - rx->dqo.buf_states[i].next = i + 1; - - rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; - rx->dqo.recycled_buf_states.head = -1; - rx->dqo.recycled_buf_states.tail = -1; - rx->dqo.used_buf_states.head = -1; - rx->dqo.used_buf_states.tail = -1; - /* Allocate RX completion queue */ size = sizeof(rx->dqo.complq.desc_ring[0]) * completion_queue_slots; @@ -355,6 +428,14 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) if (!rx->q_resources) goto err; + gve_rx_init_ring_state_dqo(rx, buffer_queue_slots, + completion_queue_slots); + + /* Allocate header buffers for header-split */ + if (priv->header_buf_pool) + if (gve_rx_alloc_hdr_bufs(priv, idx)) + goto err; + gve_rx_add_to_block(priv, idx); return 0; @@ -372,20 +453,27 @@ void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx) iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]); } +static int gve_rx_alloc_hdr_buf_pool(struct gve_priv *priv) +{ + priv->header_buf_pool = dma_pool_create("header_bufs", + &priv->pdev->dev, + priv->header_buf_size, + 64, 0); + if (!priv->header_buf_pool) + return -ENOMEM; + + return 0; +} + int gve_rx_alloc_rings_dqo(struct gve_priv *priv) { int err = 0; int i = 0; if (gve_get_enable_header_split(priv)) { - priv->header_buf_pool = dma_pool_create("header_bufs", - &priv->pdev->dev, - priv->header_buf_size, - 64, 0); - if (!priv->header_buf_pool) { - err = -ENOMEM; + err = gve_rx_alloc_hdr_buf_pool(priv); + if (err) goto err; - } } for (i = 0; i < priv->rx_cfg.num_queues; i++) { @@ -407,6 +495,14 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv) return err; } +void gve_rx_reset_rings_dqo(struct gve_priv *priv) +{ + int i; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) + gve_rx_reset_ring_dqo(priv, i); +} + void gve_rx_free_rings_dqo(struct gve_priv *priv) { int i; @@ -941,3 +1037,39 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) return work_done; } + +int gve_rx_handle_hdr_resources_dqo(struct gve_priv *priv, + bool enable_hdr_split) +{ + int err = 0; + int i; + + if (enable_hdr_split) { + err = gve_rx_alloc_hdr_buf_pool(priv); + if (err) + goto err; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + err = gve_rx_alloc_hdr_bufs(priv, i); + if (err) + goto free_buf_pool; + } + } else { + for (i = 0; i < priv->rx_cfg.num_queues; i++) + gve_rx_free_hdr_bufs(priv, i); + + dma_pool_destroy(priv->header_buf_pool); + priv->header_buf_pool = NULL; + } + + return 0; + +free_buf_pool: + for (i--; i >= 0; i--) + gve_rx_free_hdr_bufs(priv, i); + + dma_pool_destroy(priv->header_buf_pool); + priv->header_buf_pool = NULL; +err: + return err; +} From c4a2d49f711edc65bdc76798717440d9e32a054c Mon Sep 17 00:00:00 2001 From: Ziwei Xiao Date: Fri, 28 Jul 2023 15:32:58 -0700 Subject: [PATCH 24/28] gve: Add modify ring size support --- google/gve/gve.h | 25 ++++++--- google/gve/gve_adminq.c | 94 +++++++++++++++++++------------- google/gve/gve_adminq.h | 16 +++++- google/gve/gve_ethtool.c | 74 ++++++++++++++++++++++++- google/gve/gve_main.c | 42 ++++++++++++-- google/gve/gve_rx.c | 10 +--- google/gve/gve_rx_dqo.c | 4 +- google/gve/gve_tx_dqo.c | 4 +- patches/ethtool_ringparams.cocci | 64 ++++++++-------------- 9 files changed, 223 insertions(+), 110 deletions(-) diff --git a/google/gve/gve.h b/google/gve/gve.h index 19cf23b..2f4edc6 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -40,6 +40,18 @@ #define NIC_TX_STATS_REPORT_NUM 0 #define NIC_RX_STATS_REPORT_NUM 4 +/* Experiment derived */ +#define GVE_TX_PAGE_COUNT 64 + +/* Minimum descriptor ring size in bytes */ +#define GVE_RING_SIZE_MIN 4096 + +/* Sanity check min ring element length for tx and rx queues */ +#define GVE_RING_LENGTH_LIMIT_MIN 64 + +/* Sanity check max ring element length for tx and rx queues */ +#define GVE_RING_LENGTH_LIMIT_MAX 2048 + #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) // TX timeout period to check the miss path @@ -630,11 +642,6 @@ struct gve_qpl_config { unsigned long *qpl_id_map; /* bitmap of used qpl ids */ }; -struct gve_options_dqo_rda { - u16 tx_comp_ring_entries; /* number of tx_comp descriptors */ - u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ -}; - struct gve_irq_db { __be32 index; } ____cacheline_aligned; @@ -715,14 +722,16 @@ struct gve_priv { u16 num_event_counters; u16 tx_desc_cnt; /* num desc per ring */ u16 rx_desc_cnt; /* num desc per ring */ + u16 max_rx_desc_cnt; /* max num desc per rx ring */ + u16 max_tx_desc_cnt; /* max num desc per tx ring */ u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */ - u16 rx_data_slot_cnt; /* rx buffer length */ u64 max_registered_pages; u64 num_registered_pages; /* num pages registered with NIC */ struct bpf_prog *xdp_prog; /* XDP BPF program */ u32 rx_copybreak; /* copy packets smaller than this */ u16 default_num_queues; /* default num queues to set up */ + bool modify_ringsize_enabled; u16 num_xdp_queues; struct gve_queue_config tx_cfg; @@ -795,7 +804,6 @@ struct gve_priv { u64 link_speed; bool up_before_suspend; /* True if dev was up before suspend */ - struct gve_options_dqo_rda options_dqo_rda; struct gve_ptype_lut *ptype_lut_dqo; /* Must be a power of two. */ @@ -1190,6 +1198,9 @@ int gve_rss_config_init(struct gve_priv *priv); void gve_rss_set_default_indir(struct gve_priv *priv); void gve_rss_config_release(struct gve_rss_config *rss_config); +int gve_adjust_ring_sizes(struct gve_priv *priv, + int new_tx_desc_cnt, + int new_rx_desc_cnt); /* exported by ethtool.c */ extern const struct ethtool_ops gve_ethtool_ops; /* needed by ethtool */ diff --git a/google/gve/gve_adminq.c b/google/gve/gve_adminq.c index 310f267..efa22b6 100644 --- a/google/gve/gve_adminq.c +++ b/google/gve/gve_adminq.c @@ -39,7 +39,8 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option_gqi_rda **dev_op_gqi_rda, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_dqo_rda **dev_op_dqo_rda, - struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, + struct gve_device_option_modify_ring **dev_op_modify_ring, + struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, struct gve_device_option_flow_steering **dev_op_flow_steering, struct gve_device_option_dqo_qpl **dev_op_dqo_qpl) @@ -131,6 +132,23 @@ void gve_parse_device_option(struct gve_priv *priv, } *dev_op_dqo_qpl = (void *)(option + 1); break; + case GVE_DEV_OPT_ID_MODIFY_RING: + if (option_length < sizeof(**dev_op_modify_ring) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "Modify Ring", + (int)sizeof(**dev_op_modify_ring), + GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_modify_ring)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring"); + } + *dev_op_modify_ring = (void *)(option + 1); + break; case GVE_DEV_OPT_ID_JUMBO_FRAMES: if (option_length < sizeof(**dev_op_jumbo_frames) || req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) { @@ -203,6 +221,7 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_option_gqi_rda **dev_op_gqi_rda, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_dqo_rda **dev_op_dqo_rda, + struct gve_device_option_modify_ring **dev_op_modify_ring, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, struct gve_device_option_flow_steering **dev_op_flow_steering, @@ -226,9 +245,9 @@ gve_process_device_options(struct gve_priv *priv, gve_parse_device_option(priv, descriptor, dev_opt, dev_op_gqi_rda, dev_op_gqi_qpl, - dev_op_dqo_rda, dev_op_jumbo_frames, - dev_op_buffer_sizes, dev_op_flow_steering, - dev_op_dqo_qpl); + dev_op_dqo_rda, dev_op_modify_ring, + dev_op_jumbo_frames, dev_op_buffer_sizes, + dev_op_flow_steering, dev_op_dqo_qpl); dev_opt = next_opt; } @@ -600,7 +619,9 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) .queue_resources_addr = cpu_to_be64(tx->q_resources_bus), .tx_ring_addr = cpu_to_be64(tx->bus), + .tx_comp_ring_addr = cpu_to_be64(tx->complq_bus_dqo), .ntfy_id = cpu_to_be32(tx->ntfy_id), + .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt), }; if (gve_is_gqi(priv)) { @@ -609,24 +630,18 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); } else { - u16 comp_ring_size; u32 qpl_id = 0; if (priv->queue_format == GVE_DQO_RDA_FORMAT) { qpl_id = GVE_RAW_ADDRESSING_QPL_ID; - comp_ring_size = - priv->options_dqo_rda.tx_comp_ring_entries; } else { qpl_id = tx->dqo.qpl->id; - comp_ring_size = priv->tx_desc_cnt; } cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); - cmd.create_tx_queue.tx_ring_size = - cpu_to_be16(priv->tx_desc_cnt); cmd.create_tx_queue.tx_comp_ring_addr = cpu_to_be64(tx->complq_bus_dqo); cmd.create_tx_queue.tx_comp_ring_size = - cpu_to_be16(comp_ring_size); + cpu_to_be16(priv->tx_desc_cnt); } return gve_adminq_issue_cmd(priv, &cmd); @@ -657,6 +672,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) .queue_id = cpu_to_be32(queue_index), .ntfy_id = cpu_to_be32(rx->ntfy_id), .queue_resources_addr = cpu_to_be64(rx->q_resources_bus), + .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt), }; if (gve_is_gqi(priv)) { @@ -671,20 +687,14 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); } else { - u16 rx_buff_ring_entries; u32 qpl_id = 0; if (priv->queue_format == GVE_DQO_RDA_FORMAT) { qpl_id = GVE_RAW_ADDRESSING_QPL_ID; - rx_buff_ring_entries = - priv->options_dqo_rda.rx_buff_ring_entries; } else { qpl_id = rx->dqo.qpl->id; - rx_buff_ring_entries = priv->rx_desc_cnt; } cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); - cmd.create_rx_queue.rx_ring_size = - cpu_to_be16(priv->rx_desc_cnt); cmd.create_rx_queue.rx_desc_ring_addr = cpu_to_be64(rx->dqo.complq.bus); cmd.create_rx_queue.rx_data_ring_addr = @@ -692,7 +702,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(priv->data_buffer_size_dqo); cmd.create_rx_queue.rx_buff_ring_size = - cpu_to_be16(rx_buff_ring_entries); + cpu_to_be16(priv->rx_desc_cnt); cmd.create_rx_queue.enable_rsc = !!(priv->dev->features & NETIF_F_LRO); if (rx->dqo.hdr_bufs) @@ -785,14 +795,14 @@ static int gve_set_desc_cnt(struct gve_priv *priv, struct gve_device_descriptor *descriptor) { priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); - if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) { + if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < GVE_RING_SIZE_MIN) { dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt); return -EINVAL; } priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0]) - < PAGE_SIZE) { + < GVE_RING_SIZE_MIN) { dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", priv->rx_desc_cnt); return -EINVAL; @@ -803,33 +813,39 @@ static int gve_set_desc_cnt(struct gve_priv *priv, static int gve_set_desc_cnt_dqo(struct gve_priv *priv, const struct gve_device_descriptor *descriptor, - const struct gve_device_option_dqo_rda *dev_op_dqo_rda) + const struct gve_device_option_dqo_rda *dev_op_dqo_rda) { priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); - if (priv->queue_format == GVE_DQO_QPL_FORMAT) - return 0; - - priv->options_dqo_rda.tx_comp_ring_entries = - be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries); - priv->options_dqo_rda.rx_buff_ring_entries = - be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries); - return 0; } static void gve_enable_supported_features( struct gve_priv *priv, u32 supported_features_mask, + const struct gve_device_option_modify_ring *dev_op_modify_ring, const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames, const struct gve_device_option_buffer_sizes *dev_op_buffer_sizes, const struct gve_device_option_flow_steering *dev_op_flow_steering, const struct gve_device_option_dqo_qpl *dev_op_dqo_qpl) { int buf_size; + if (dev_op_modify_ring && + (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) { + priv->modify_ringsize_enabled = true; + dev_info(&priv->pdev->dev, "MODIFY RING device option enabled.\n"); + priv->max_rx_desc_cnt = min_t( + int, + be16_to_cpu(dev_op_modify_ring->max_rx_ring_size), + GVE_RING_LENGTH_LIMIT_MAX); + priv->max_tx_desc_cnt = min_t( + int, + be16_to_cpu(dev_op_modify_ring->max_tx_ring_size), + GVE_RING_LENGTH_LIMIT_MAX); + } - /* Before control reaches this point, the page-size-capped max MTU in + /* Before control reaches this point, the page-size-capped max MTU from * the gve_device_descriptor field has already been stored in * priv->dev->max_mtu. We overwrite it with the true max MTU below. */ @@ -895,6 +911,7 @@ static void gve_enable_supported_features( int gve_adminq_describe_device(struct gve_priv *priv) { + struct gve_device_option_modify_ring *dev_op_modify_ring = NULL; struct gve_device_option_flow_steering *dev_op_flow_steering = NULL; struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; @@ -928,6 +945,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, &dev_op_gqi_qpl, &dev_op_dqo_rda, + &dev_op_modify_ring, &dev_op_jumbo_frames, &dev_op_buffer_sizes, &dev_op_flow_steering, @@ -977,6 +995,11 @@ int gve_adminq_describe_device(struct gve_priv *priv) if (err) goto free_device_descriptor; + /* Default max to current in case modify ring size option is disabled */ + // hardcode to 2048 for now; should remove after device changes rollout to prod + priv->max_rx_desc_cnt = 2048; + priv->max_tx_desc_cnt = 2048; + priv->max_registered_pages = be64_to_cpu(descriptor->max_registered_pages); mtu = be16_to_cpu(descriptor->mtu); @@ -990,18 +1013,15 @@ int gve_adminq_describe_device(struct gve_priv *priv) eth_hw_addr_set(priv->dev, descriptor->mac); mac = descriptor->mac; dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac); + priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl); - priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl); + priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl); - if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) { - dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n", - priv->rx_data_slot_cnt); - priv->rx_desc_cnt = priv->rx_data_slot_cnt; - } priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); gve_enable_supported_features(priv, supported_features_mask, - dev_op_jumbo_frames, + dev_op_modify_ring, + dev_op_jumbo_frames, dev_op_buffer_sizes, dev_op_flow_steering, dev_op_dqo_qpl); diff --git a/google/gve/gve_adminq.h b/google/gve/gve_adminq.h index 2c69410..a5eaff1 100644 --- a/google/gve/gve_adminq.h +++ b/google/gve/gve_adminq.h @@ -82,7 +82,7 @@ struct gve_device_descriptor { u8 mac[ETH_ALEN]; __be16 num_device_options; __be16 total_length; - u8 reserved2[6]; + u8 reserved3[6]; }; static_assert(sizeof(struct gve_device_descriptor) == 40); @@ -109,8 +109,7 @@ static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4); struct gve_device_option_dqo_rda { __be32 supported_features_mask; - __be16 tx_comp_ring_entries; - __be16 rx_buff_ring_entries; + __be32 reserved; }; static_assert(sizeof(struct gve_device_option_dqo_rda) == 8); @@ -123,6 +122,14 @@ struct gve_device_option_dqo_qpl { static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8); +struct gve_device_option_modify_ring { + __be32 supported_features_mask; + __be16 max_rx_ring_size; + __be16 max_tx_ring_size; +}; + +static_assert(sizeof(struct gve_device_option_modify_ring) == 8); + struct gve_device_option_jumbo_frames { __be32 supported_features_mask; __be16 max_mtu; @@ -160,6 +167,7 @@ enum gve_dev_opt_id { GVE_DEV_OPT_ID_GQI_RDA = 0x2, GVE_DEV_OPT_ID_GQI_QPL = 0x3, GVE_DEV_OPT_ID_DQO_RDA = 0x4, + GVE_DEV_OPT_ID_MODIFY_RING = 0x6, GVE_DEV_OPT_ID_DQO_QPL = 0x7, GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, @@ -171,6 +179,7 @@ enum gve_dev_opt_req_feat_mask { GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0, @@ -178,6 +187,7 @@ enum gve_dev_opt_req_feat_mask { }; enum gve_sup_feature_mask { + GVE_SUP_MODIFY_RING_MASK = 1 << 0, GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, GVE_SUP_FLOW_STEERING_MASK = 1 << 5, diff --git a/google/gve/gve_ethtool.c b/google/gve/gve_ethtool.c index 95f933c..92ef6a2 100644 --- a/google/gve/gve_ethtool.c +++ b/google/gve/gve_ethtool.c @@ -497,13 +497,80 @@ static void gve_get_ringparam(struct net_device *netdev, struct netlink_ext_ack *extack) { struct gve_priv *priv = netdev_priv(netdev); - - cmd->rx_max_pending = priv->rx_desc_cnt; - cmd->tx_max_pending = priv->tx_desc_cnt; + cmd->rx_max_pending = priv->max_rx_desc_cnt; + cmd->tx_max_pending = priv->max_tx_desc_cnt; cmd->rx_pending = priv->rx_desc_cnt; cmd->tx_pending = priv->tx_desc_cnt; } +static int gve_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *cmd, + struct kernel_ethtool_ringparam *kernel_cmd, + struct netlink_ext_ack *extack) +{ + struct gve_priv *priv = netdev_priv(netdev); + int old_rx_desc_cnt = priv->rx_desc_cnt; + int old_tx_desc_cnt = priv->tx_desc_cnt; + int new_tx_desc_cnt = cmd->tx_pending; + int new_rx_desc_cnt = cmd->rx_pending; + int new_max_registered_pages = + new_rx_desc_cnt * gve_num_rx_qpls(priv) + + GVE_TX_PAGE_COUNT * gve_num_tx_qpls(priv); + + if (new_tx_desc_cnt < GVE_RING_LENGTH_LIMIT_MIN || + new_rx_desc_cnt < GVE_RING_LENGTH_LIMIT_MIN) { + dev_err(&priv->pdev->dev, "Ring size cannot be less than %d\n", + GVE_RING_LENGTH_LIMIT_MIN); + return -EINVAL; + } + + if (new_tx_desc_cnt > GVE_RING_LENGTH_LIMIT_MAX || + new_rx_desc_cnt > GVE_RING_LENGTH_LIMIT_MAX) { + dev_err(&priv->pdev->dev, + "Ring size cannot be greater than %d\n", + GVE_RING_LENGTH_LIMIT_MAX); + return -EINVAL; + } + + /* Ring size must be a power of 2, will fail if passed values are not + * In the future we may want to update to round down to the + * closest valid ring size + */ + if ((new_tx_desc_cnt & (new_tx_desc_cnt - 1)) != 0 || + (new_rx_desc_cnt & (new_rx_desc_cnt - 1)) != 0) { + dev_err(&priv->pdev->dev, "Ring size must be a power of 2\n"); + return -EINVAL; + } + + if (new_tx_desc_cnt > priv->max_tx_desc_cnt) { + dev_err(&priv->pdev->dev, + "Tx ring size passed %d is larger than max tx ring size %u\n", + new_tx_desc_cnt, priv->max_tx_desc_cnt); + return -EINVAL; + } + + if (new_rx_desc_cnt > priv->max_rx_desc_cnt) { + dev_err(&priv->pdev->dev, + "Rx ring size passed %d is larger than max rx ring size %u\n", + new_rx_desc_cnt, priv->max_rx_desc_cnt); + return -EINVAL; + } + + if (new_max_registered_pages > priv->max_registered_pages) { + dev_err(&priv->pdev->dev, + "Allocating too many pages %d; max %llu", + new_max_registered_pages, + priv->max_registered_pages); + return -EINVAL; + } + + // Nothing to change return success + if (new_tx_desc_cnt == old_tx_desc_cnt && new_rx_desc_cnt == old_rx_desc_cnt) + return 0; + + return gve_adjust_ring_sizes(priv, new_tx_desc_cnt, new_rx_desc_cnt); +} + static int gve_user_reset(struct net_device *netdev, u32 *flags) { struct gve_priv *priv = netdev_priv(netdev); @@ -1352,6 +1419,7 @@ const struct ethtool_ops gve_ethtool_ops = { .get_coalesce = gve_get_coalesce, .set_coalesce = gve_set_coalesce, .get_ringparam = gve_get_ringparam, + .set_ringparam = gve_set_ringparam, .reset = gve_user_reset, .get_tunable = gve_get_tunable, .set_tunable = gve_set_tunable, diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index f15683f..9efc6e5 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -1179,8 +1179,7 @@ static int gve_alloc_xdp_qpls(struct gve_priv *priv) start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { - err = gve_alloc_queue_page_list(priv, i, - priv->tx_pages_per_qpl); + err = gve_alloc_queue_page_list(priv, i, GVE_TX_PAGE_COUNT); if (err) goto free_qpls; } @@ -1209,7 +1208,8 @@ static int gve_alloc_qpls(struct gve_priv *priv) return -ENOMEM; start_id = gve_tx_start_qpl_id(priv); - page_count = priv->tx_pages_per_qpl; + page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ? + GVE_TX_PAGE_COUNT : priv->tx_pages_per_qpl; for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { err = gve_alloc_queue_page_list(priv, i, page_count); if (err) @@ -1223,7 +1223,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) * more than descriptors (because of out of order completions). */ page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ? - priv->rx_data_slot_cnt : priv->rx_pages_per_qpl; + priv->rx_desc_cnt : priv->rx_pages_per_qpl; for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { err = gve_alloc_queue_page_list(priv, i, page_count); if (err) @@ -1495,6 +1495,38 @@ static int gve_close(struct net_device *dev) return gve_reset_recovery(priv, false); } +int gve_adjust_ring_sizes(struct gve_priv *priv, + int new_tx_desc_cnt, + int new_rx_desc_cnt) +{ + int err; + + if (netif_carrier_ok(priv->dev)) { + err = gve_close(priv->dev); + if (err) + return err; + priv->tx_desc_cnt = new_tx_desc_cnt; + priv->rx_desc_cnt = new_rx_desc_cnt; + + err = gve_open(priv->dev); + if (err) + goto err; + return 0; + } + + priv->tx_desc_cnt = new_tx_desc_cnt; + priv->rx_desc_cnt = new_rx_desc_cnt; + + return 0; + +err: + dev_err(&priv->pdev->dev, + "Failed to adjust ring sizes: err=%d. Disabling all queues.\n", + err); + gve_turndown(priv); + return err; +} + static int gve_remove_xdp_queues(struct gve_priv *priv) { int err; @@ -2240,6 +2272,8 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) goto setup_device; priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED; + priv->modify_ringsize_enabled = false; + /* Get the initial information we need from the device */ err = gve_adminq_describe_device(priv); if (err) { diff --git a/google/gve/gve_rx.c b/google/gve/gve_rx.c index d1da741..e52c048 100644 --- a/google/gve/gve_rx.c +++ b/google/gve/gve_rx.c @@ -197,9 +197,9 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) { struct gve_rx_ring *rx = &priv->rx[idx]; struct device *hdev = &priv->pdev->dev; - u32 slots, npages; int filled_pages; size_t bytes; + u32 slots; int err; netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); @@ -209,7 +209,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) rx->gve = priv; rx->q_num = idx; - slots = priv->rx_data_slot_cnt; + slots = priv->rx_desc_cnt; rx->mask = slots - 1; rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; @@ -256,12 +256,6 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) /* alloc rx desc ring */ bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt; - npages = bytes / PAGE_SIZE; - if (npages * PAGE_SIZE != bytes) { - err = -EIO; - goto abort_with_q_resources; - } - rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, GFP_KERNEL); if (!rx->desc.desc_ring) { diff --git a/google/gve/gve_rx_dqo.c b/google/gve/gve_rx_dqo.c index 3669e9c..b9a5a63 100644 --- a/google/gve/gve_rx_dqo.c +++ b/google/gve/gve_rx_dqo.c @@ -379,9 +379,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) struct device *hdev = &priv->pdev->dev; size_t size; - const u32 buffer_queue_slots = - priv->queue_format == GVE_DQO_RDA_FORMAT ? - priv->options_dqo_rda.rx_buff_ring_entries : priv->rx_desc_cnt; + const u32 buffer_queue_slots = priv->rx_desc_cnt; const u32 completion_queue_slots = priv->rx_desc_cnt; netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); diff --git a/google/gve/gve_tx_dqo.c b/google/gve/gve_tx_dqo.c index d87de61..9c3bc6b 100644 --- a/google/gve/gve_tx_dqo.c +++ b/google/gve/gve_tx_dqo.c @@ -269,9 +269,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) /* Queue sizes must be a power of 2 */ tx->mask = priv->tx_desc_cnt - 1; - tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ? - priv->options_dqo_rda.tx_comp_ring_entries - 1 : - tx->mask; + tx->dqo.complq_mask = priv->tx_desc_cnt - 1; /* The max number of pending packets determines the maximum number of * descriptors which maybe written to the completion queue. diff --git a/patches/ethtool_ringparams.cocci b/patches/ethtool_ringparams.cocci index 1380006..13ab72c 100644 --- a/patches/ethtool_ringparams.cocci +++ b/patches/ethtool_ringparams.cocci @@ -1,49 +1,29 @@ -@ assigned @ -identifier get_ringparam_func, ethtool_ops_obj; +@ gve_get_ringparam @ +identifier gve_get_ringparam, netdev, cmd; @@ -struct ethtool_ops ethtool_ops_obj = { - .get_ringparam = get_ringparam_func, -}; - -@ declared depends on assigned @ -identifier dev_param, cmd_param, kernel_cmd_param, extack_param; -identifier assigned.get_ringparam_func; -fresh identifier backport_get_ringparam = "backport_" ## get_ringparam_func; -@@ - -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0)) -static void get_ringparam_func(struct net_device *dev_param, - struct ethtool_ringparam *cmd_param, - struct kernel_ethtool_ringparam *kernel_cmd_param, - struct netlink_ext_ack *extack_param) +static void gve_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *cmd ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 7) + , struct kernel_ethtool_ringparam *kernel_cmd, + struct netlink_ext_ack *extack ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 7) */ + ) { - ... +... } -+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0)) */ -+static void -+backport_get_ringparam(struct net_device *dev_param, -+ struct ethtool_ringparam *cmd_param) -+{ -+ struct gve_priv *priv = netdev_priv(netdev); -+ -+ cmd->rx_max_pending = priv->rx_desc_cnt; -+ cmd->tx_max_pending = priv->tx_desc_cnt; -+ cmd->rx_pending = priv->rx_desc_cnt; -+ cmd->tx_pending = priv->tx_desc_cnt; -+} -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0)) */ -@ mod_assignment @ -identifier assigned.get_ringparam_func; -identifier assigned.ethtool_ops_obj; -fresh identifier backport_get_ringparam = "backport_" ## get_ringparam_func; +@ gve_set_ringparam @ +identifier gve_set_ringparam, netdev, cmd; @@ -struct ethtool_ops ethtool_ops_obj = { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0)) - .get_ringparam = get_ringparam_func, -+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0)) */ -+ .get_ringparam = backport_get_ringparam, -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0)) */ -}; +static int gve_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *cmd ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 7) + , struct kernel_ethtool_ringparam *kernel_cmd, + struct netlink_ext_ack *extack ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 7) */ + ) +{ +... +} From 948a123e2fcb5104d6903c8e7970fc0233cc35db Mon Sep 17 00:00:00 2001 From: Jeroen de Borst Date: Sat, 14 Oct 2023 09:50:02 -0700 Subject: [PATCH 25/28] Update README.md Updated the outdated support statement. --- README.md | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8dc6661..c02a284 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,26 @@ Device Class | `0x200` | Ethernet # Supported Kernels -4.16, 4.14, 4.9, 4.6, 4.4, 4.2, 3.19, 3.16, 3.13, 3.10 +This driver s supported on any of the [distros listed as supporting gVNIC](https://cloud.google.com/compute/docs/images/os-details#networking). +Those distros have native drivers for gVNIC, but this driver can be used to +replace the native driver to get the latest enhancements. Note that native +drivers are likely to report version 1.0.0, this should be ignored. The +upstream community has deprecated the use of driver versions it has not +been updated since the initial upstream version. + +This driver is also supported on [clean Linux LTS kernels that are not EOL](https://www.kernel.org/category/releases.html). +Linux kernels starting 5.4 have the driver built in, but this driver can be +installed on any LTS kernel to get the latest enhancements. + +Debian 9 and 10 are supported since they use clean Linux LTS kernels +(4.9 and 4.19 respectively). + +Versions that are not marked as a release candidate (rc) correspond to upstream +versions of the driver. It is our intention that release candidates +will be upstreamed in the near future, but when and in what form this happens +depends on the Linux community and the upstream review process. We can't +guarantee that a release candidate will land upstream as-is or if it +will be accepted upstream at all. # Installation From 1b4fe3f70e982b49507bc6fad865c23c9d22cc30 Mon Sep 17 00:00:00 2001 From: Praveen Kaligineedi Date: Tue, 24 Oct 2023 16:03:59 -0700 Subject: [PATCH 26/28] Bump version to 1.4.0rc4 --- google/gve/gve_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index 9efc6e5..4dd70c9 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -26,7 +26,7 @@ #define GVE_DEFAULT_RX_COPYBREAK (256) #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK) -#define GVE_VERSION "1.4.0rc3" +#define GVE_VERSION "1.4.0rc4" #define GVE_VERSION_PREFIX "GVE-" // Minimum amount of time between queue kicks in msec (10 seconds) From 827275e783d37f554813371c2d54dfcc59606024 Mon Sep 17 00:00:00 2001 From: Kevin Krakauer Date: Tue, 19 Dec 2023 12:48:34 -0800 Subject: [PATCH 27/28] docs: Add module installation help Based on some issues I ran into when following instructions. --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index c02a284..abd41f8 100644 --- a/README.md +++ b/README.md @@ -85,6 +85,17 @@ depmod modprobe gve ``` +Check via `ethtool -i devname` that the new driver is installed. If not, you can +install it manually. + +> [!WARNING] +> Run this as a single line, as running `rmmod` alone will remove the existing +> driver and disconnect you if connected over SSH. + +```bash +sudo rmmod gve; sudo insmod ./build/gve.ko +``` + # Configuration ## Ethtool @@ -108,6 +119,12 @@ ethtool --set-channels devname [rx N] [tx N] [combined N] combined: attempts to set both rx and tx queues to N rx: attempts to set rx queues to N tx: attempts to set tx queues to N +## XDP + +To attach an XDP program to the driver, the number of RX and TX queues must be +no more than half their maximum values. The maximum values are based on the +number of CPUs available. + ### Manual Configuration To manually configure gVNIC, you'll need to complete the following steps: From d1884db5bd12405f9177708cd46c9862af766e12 Mon Sep 17 00:00:00 2001 From: Harshitha Ramamurthy Date: Wed, 10 Jan 2024 00:33:31 +0000 Subject: [PATCH 28/28] backport: gve: unify driver name usage Backport upstream commit 9d0aba98316d ("gve: unify driver name usage") https://git.kernel.org/netdev/net/c/9d0aba98316d Signed-off-by: Junfeng Guo Signed-off-by: Harshitha Ramamurthy --- google/gve/gve.h | 1 + google/gve/gve_ethtool.c | 2 +- google/gve/gve_main.c | 11 ++++++----- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/google/gve/gve.h b/google/gve/gve.h index 2f4edc6..9c767a0 100644 --- a/google/gve/gve.h +++ b/google/gve/gve.h @@ -1203,6 +1203,7 @@ int gve_adjust_ring_sizes(struct gve_priv *priv, int new_rx_desc_cnt); /* exported by ethtool.c */ extern const struct ethtool_ops gve_ethtool_ops; +extern char gve_driver_name[]; /* needed by ethtool */ extern const char gve_version_str[]; #endif /* _GVE_H_ */ diff --git a/google/gve/gve_ethtool.c b/google/gve/gve_ethtool.c index 92ef6a2..42af5f4 100644 --- a/google/gve/gve_ethtool.c +++ b/google/gve/gve_ethtool.c @@ -15,7 +15,7 @@ static void gve_get_drvinfo(struct net_device *netdev, { struct gve_priv *priv = netdev_priv(netdev); - strscpy(info->driver, "gve", sizeof(info->driver)); + strscpy(info->driver, gve_driver_name, sizeof(info->driver)); strscpy(info->version, gve_version_str, sizeof(info->version)); strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info)); } diff --git a/google/gve/gve_main.c b/google/gve/gve_main.c index 4dd70c9..7d04be6 100644 --- a/google/gve/gve_main.c +++ b/google/gve/gve_main.c @@ -32,6 +32,7 @@ // Minimum amount of time between queue kicks in msec (10 seconds) #define MIN_TX_TIMEOUT_GAP (1000 * 10) +char gve_driver_name[] = "gve"; const char gve_version_str[] = GVE_VERSION; static const char gve_version_prefix[] = GVE_VERSION_PREFIX; @@ -2450,7 +2451,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - err = pci_request_regions(pdev, "gvnic-cfg"); + err = pci_request_regions(pdev, gve_driver_name); if (err) goto abort_with_enabled; @@ -2691,8 +2692,8 @@ static const struct pci_device_id gve_id_table[] = { { } }; -static struct pci_driver gvnic_driver = { - .name = "gvnic", +static struct pci_driver gve_driver = { + .name = gve_driver_name, .id_table = gve_id_table, .probe = gve_probe, .remove = gve_remove, @@ -2703,10 +2704,10 @@ static struct pci_driver gvnic_driver = { #endif }; -module_pci_driver(gvnic_driver); +module_pci_driver(gve_driver); MODULE_DEVICE_TABLE(pci, gve_id_table); MODULE_AUTHOR("Google, Inc."); -MODULE_DESCRIPTION("gVNIC Driver"); +MODULE_DESCRIPTION("Google Virtual NIC Driver"); MODULE_LICENSE("Dual MIT/GPL"); MODULE_VERSION(GVE_VERSION);