Revert "i40e: xsk: Move tmp desc array from driver to pool"

This reverts commit 761b4fa752 which is
commit d1bc532e99becf104635ed4da6fefa306f452321 upstream.

It breaks the Android kernel ABI and is not needed for Android devices,
so it is safe to revert for now.  If it is determined that it is needed
in the future, it can be brought back in an abi-preserving way.

Bug: 161946584
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Id228897a297ea667590bf555928722e3585ccbef
This commit is contained in:
Greg Kroah-Hartman
2022-08-08 12:59:57 +02:00
parent 3be14b0d34
commit e2717b85d0
8 changed files with 30 additions and 24 deletions

View File

@@ -830,6 +830,8 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
i40e_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
kfree(tx_ring->xsk_descs);
tx_ring->xsk_descs = NULL;
if (tx_ring->desc) {
dma_free_coherent(tx_ring->dev, tx_ring->size,
@@ -1431,6 +1433,13 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi)
goto err;
if (ring_is_xdp(tx_ring)) {
tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
GFP_KERNEL);
if (!tx_ring->xsk_descs)
goto err;
}
u64_stats_init(&tx_ring->syncp);
/* round up to nearest 4K */
@@ -1454,6 +1463,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
return 0;
err:
kfree(tx_ring->xsk_descs);
tx_ring->xsk_descs = NULL;
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
return -ENOMEM;

View File

@@ -390,6 +390,7 @@ struct i40e_ring {
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)

View File

@@ -473,11 +473,11 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
**/
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
{
struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
struct xdp_desc *descs = xdp_ring->xsk_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
if (!nb_pkts)
return true;

View File

@@ -13,7 +13,7 @@
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc, u32 max);
void xsk_tx_release(struct xsk_buff_pool *pool);
struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
u16 queue_id);
@@ -129,7 +129,8 @@ static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
return false;
}
static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc,
u32 max)
{
return 0;
}

View File

@@ -60,7 +60,6 @@ struct xsk_buff_pool {
*/
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
struct xdp_desc *tx_descs;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;

View File

@@ -358,9 +358,9 @@ out:
}
EXPORT_SYMBOL(xsk_tx_peek_desc);
static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, struct xdp_desc *descs,
u32 max_entries)
{
struct xdp_desc *descs = pool->tx_descs;
u32 nb_pkts = 0;
while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
@@ -370,7 +370,8 @@ static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entr
return nb_pkts;
}
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *descs,
u32 max_entries)
{
struct xdp_sock *xs;
u32 nb_pkts;
@@ -379,7 +380,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
if (!list_is_singular(&pool->xsk_tx_list)) {
/* Fallback to the non-batched version */
rcu_read_unlock();
return xsk_tx_peek_release_fallback(pool, max_entries);
return xsk_tx_peek_release_fallback(pool, descs, max_entries);
}
xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
@@ -388,7 +389,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
goto out;
}
nb_pkts = xskq_cons_peek_desc_batch(xs->tx, pool, max_entries);
nb_pkts = xskq_cons_peek_desc_batch(xs->tx, descs, pool, max_entries);
if (!nb_pkts) {
xs->tx->queue_empty_descs++;
goto out;
@@ -400,7 +401,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
* packets. This avoids having to implement any buffering in
* the Tx path.
*/
nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, descs, nb_pkts);
if (!nb_pkts)
goto out;

View File

@@ -37,7 +37,6 @@ void xp_destroy(struct xsk_buff_pool *pool)
if (!pool)
return;
kvfree(pool->tx_descs);
kvfree(pool->heads);
kvfree(pool);
}
@@ -58,12 +57,6 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
if (!pool->heads)
goto out;
if (xs->tx) {
pool->tx_descs = kcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), GFP_KERNEL);
if (!pool->tx_descs)
goto out;
}
pool->chunk_mask = ~((u64)umem->chunk_size - 1);
pool->addrs_cnt = umem->size;
pool->heads_cnt = umem->chunks;

View File

@@ -201,11 +201,11 @@ static inline bool xskq_cons_read_desc(struct xsk_queue *q,
return false;
}
static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
u32 max)
static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q,
struct xdp_desc *descs,
struct xsk_buff_pool *pool, u32 max)
{
u32 cached_cons = q->cached_cons, nb_entries = 0;
struct xdp_desc *descs = pool->tx_descs;
while (cached_cons != q->cached_prod && nb_entries < max) {
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
@@ -278,12 +278,12 @@ static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
return xskq_cons_read_desc(q, desc, pool);
}
static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
u32 max)
static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xdp_desc *descs,
struct xsk_buff_pool *pool, u32 max)
{
u32 entries = xskq_cons_nb_entries(q, max);
return xskq_cons_read_desc_batch(q, pool, entries);
return xskq_cons_read_desc_batch(q, descs, pool, entries);
}
/* To improve performance in the xskq_cons_release functions, only update local state here.