[go: nahoru, domu]

Skip to content

Commit

Permalink
gve: Add XDP support for GQI-QPL format
Browse files Browse the repository at this point in the history
Adding support for XDP DROP, PASS, TX, REDIRECT for GQI QPL format.
Add AF_XDP zero-copy support.

When an XDP program is installed, dedicated TX queues are created to
handle XDP traffic. The user needs to ensure that the number of
configured TX queues is equal to the number of configured RX queues; and
the number of TX/RX queues is less than or equal to half the maximum
number of TX/RX queues.

The XDP traffic from AF_XDP sockets and from other NICs (arriving via
XDP_REDIRECT) will also egress through the dedicated XDP TX queues.

Although these changes support AF_XDP socket in zero-copy mode, there is
still a copy happening within the driver between XSK buffer pool and QPL
bounce buffers in GQI-QPL format.

The following example demonstrates how the XDP packets are mapped to
TX queues:

Example configuration:
Max RX queues : 2N, Max TX queues : 2N
Configured RX queues : N, Configured TX queues : N

TX queue mapping:
TX queues with queue id 0,...,N-1 will handle traffic from the stack.
TX queues with queue id N,...,2N-1 will handle XDP traffic.

For the XDP packets transmitted using XDP_TX action:
<Egress TX queue id> = N + <Ingress RX queue id>

For the XDP packets that arrive from other NICs via XDP_REDIRECT action:
<Egress TX queue id> = N + ( smp_processor_id % N )

For AF_XDP zero-copy mode:
<Egress TX queue id> = N + <AF_XDP TX queue id>

---

Updated version to 1.4.0rc1
  • Loading branch information
praveenkaligineedi committed Mar 24, 2023
1 parent 6882374 commit 9d73cf4
Show file tree
Hide file tree
Showing 12 changed files with 1,667 additions and 139 deletions.
112 changes: 95 additions & 17 deletions google/gve/gve.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@

#define GVE_RX_BUFFER_SIZE_DQO 2048

#define GVE_XDP_ACTIONS 5

#define GVE_TX_MAX_HEADER_SIZE 182

/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
Expand Down Expand Up @@ -230,14 +234,23 @@ struct gve_rx_ring {
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */

u64 xdp_tx_errors;
u64 xdp_redirect_errors;
u64 xdp_alloc_fails;
u64 xdp_actions[GVE_XDP_ACTIONS];
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
dma_addr_t q_resources_bus; /* dma address for the queue resources */
struct u64_stats_sync statss; /* sync stats for 32bit archs */

struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */

/* XDP stuff */
struct xdp_rxq_info xdp_rxq;
struct xdp_rxq_info xsk_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
};

/* A TX desc ring entry */
Expand All @@ -258,7 +271,14 @@ struct gve_tx_iovec {
* ring entry but only used for a pkt_desc not a seg_desc
*/
struct gve_tx_buffer_state {
struct sk_buff *skb; /* skb for this pkt */
union {
struct sk_buff *skb; /* skb for this pkt */
struct xdp_frame *xdp_frame; /* xdp_frame */
};
struct {
u16 size; /* size of xmitted xdp pkt */
u8 is_xsk; /* xsk buff */
} xdp;
union {
struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
struct {
Expand Down Expand Up @@ -373,6 +393,8 @@ struct gve_tx_ring {
struct {
/* Spinlock for when cleanup in progress */
spinlock_t clean_lock;
/* Spinlock for XDP tx traffic */
spinlock_t xdp_lock;
};

/* DQO fields. */
Expand Down Expand Up @@ -450,6 +472,12 @@ struct gve_tx_ring {
dma_addr_t q_resources_bus; /* dma address of the queue resources */
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct xsk_buff_pool *xsk_pool;
u32 xdp_xsk_wakeup;
u32 xdp_xsk_done;
u64 xdp_xsk_sent;
u64 xdp_xmit;
u64 xdp_xmit_errors;
} ____cacheline_aligned;

/* Wraps the info for one irq including the napi struct and the queues
Expand Down Expand Up @@ -526,9 +554,11 @@ struct gve_priv {
u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */

u16 num_xdp_queues;
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
struct gve_qpl_config qpl_cfg; /* map used QPL ids */
Expand Down Expand Up @@ -785,7 +815,17 @@ static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
if (priv->queue_format != GVE_GQI_QPL_FORMAT)
return 0;

return priv->tx_cfg.num_queues;
return priv->tx_cfg.num_queues + priv->num_xdp_queues;
}

/* Returns the number of XDP tx queue page lists
*/
static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
{
if (priv->queue_format != GVE_GQI_QPL_FORMAT)
return 0;

return priv->num_xdp_queues;
}

/* Returns the number of rx queue page lists
Expand All @@ -798,16 +838,35 @@ static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
return priv->rx_cfg.num_queues;
}

static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
{
return tx_qid;
}

static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
{
return priv->tx_cfg.max_queues + rx_qid;
}

static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
{
return gve_tx_qpl_id(priv, 0);
}

static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
{
return gve_rx_qpl_id(priv, 0);
}

/* Returns a pointer to the next available tx qpl in the list of qpls
*/
static inline
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
{
int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
priv->qpl_cfg.qpl_map_size);
int id = gve_tx_qpl_id(priv, tx_qid);

/* we are out of tx qpls */
if (id >= gve_num_tx_qpls(priv))
/* QPL already in use */
if (test_bit(id, priv->qpl_cfg.qpl_id_map))
return NULL;

set_bit(id, priv->qpl_cfg.qpl_id_map);
Expand All @@ -817,14 +876,12 @@ struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
/* Returns a pointer to the next available rx qpl in the list of qpls
*/
static inline
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
{
int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
priv->qpl_cfg.qpl_map_size,
gve_num_tx_qpls(priv));
int id = gve_rx_qpl_id(priv, rx_qid);

/* we are out of rx qpls */
if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
/* QPL already in use */
if (test_bit(id, priv->qpl_cfg.qpl_id_map))
return NULL;

set_bit(id, priv->qpl_cfg.qpl_id_map);
Expand All @@ -843,7 +900,7 @@ static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id)
{
if (id < gve_num_tx_qpls(priv))
if (id < gve_rx_start_qpl_id(priv))
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
Expand All @@ -855,6 +912,21 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
priv->queue_format == GVE_GQI_QPL_FORMAT;
}

static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
return priv->tx_cfg.num_queues + priv->num_xdp_queues;
}

static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
{
return priv->tx_cfg.num_queues + queue_id;
}

static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
{
return gve_xdp_tx_queue_id(priv, 0);
}

/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
Expand All @@ -863,9 +935,15 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void *data, int len, void *frame_p);
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv);
void gve_tx_free_rings_gqi(struct gve_priv *priv);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
Expand Down
8 changes: 4 additions & 4 deletions google/gve/gve_adminq.c
Original file line number Diff line number Diff line change
Expand Up @@ -516,12 +516,12 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
return gve_adminq_issue_cmd(priv, &cmd);
}

int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{
int err;
int i;

for (i = 0; i < num_queues; i++) {
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i);
if (err)
return err;
Expand Down Expand Up @@ -604,12 +604,12 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
return 0;
}

int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{
int err;
int i;

for (i = 0; i < num_queues; i++) {
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i);
if (err)
return err;
Expand Down
4 changes: 2 additions & 2 deletions google/gve/gve_adminq.h
Original file line number Diff line number Diff line change
Expand Up @@ -410,8 +410,8 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t db_array_bus_addr,
u32 num_ntfy_blks);
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
Expand Down
Loading

0 comments on commit 9d73cf4

Please sign in to comment.