diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index f1d2dea90a8c84ba30644908996e12bcc4072f90..5975521a4c8614d84f638856dda266071a27180b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -379,40 +379,35 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) (pfvf->hw.cq_ecount_wait - 1)); } -dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, - gfp_t gfp) +dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool) { dma_addr_t iova; + u8 *buf; - /* Check if request can be accommodated in previous allocated page */ - if (pool->page && ((pool->page_offset + pool->rbsize) <= - (PAGE_SIZE << pool->rbpage_order))) { - pool->pageref++; - goto ret; - } - - otx2_get_page(pool); - - /* Allocate a new page */ - pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, - pool->rbpage_order); - if (unlikely(!pool->page)) + buf = napi_alloc_frag(pool->rbsize); + if (unlikely(!buf)) return -ENOMEM; - pool->page_offset = 0; -ret: - iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset, - pool->rbsize, DMA_FROM_DEVICE); - if (!iova) { - if (!pool->page_offset) - __free_pages(pool->page, pool->rbpage_order); - pool->page = NULL; + iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (unlikely(dma_mapping_error(pfvf->dev, iova))) { + page_frag_free(buf); return -ENOMEM; } - pool->page_offset += pool->rbsize; + return iova; } +static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool) +{ + dma_addr_t addr; + + local_bh_disable(); + addr = __otx2_alloc_rbuf(pfvf, pool); + local_bh_enable(); + return addr; +} + void otx2_tx_timeout(struct net_device *netdev, unsigned int txq) { struct otx2_nic *pfvf = netdev_priv(netdev); @@ -805,7 +800,7 @@ static void otx2_pool_refill_task(struct work_struct *work) free_ptrs = cq->pool_ptrs; while (cq->pool_ptrs) { - bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL); + bufptr = otx2_alloc_rbuf(pfvf, rbpool); if (bufptr <= 0) { /* Schedule a WQ if we fails to free atleast half of the * pointers else enable napi for this RQ. @@ -1064,7 +1059,6 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, return err; pool->rbsize = buf_size; - pool->rbpage_order = get_order(buf_size); /* Initialize this pool's context via AF */ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); @@ -1152,13 +1146,12 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) return -ENOMEM; for (ptr = 0; ptr < num_sqbs; ptr++) { - bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL); + bufptr = otx2_alloc_rbuf(pfvf, pool); if (bufptr <= 0) return bufptr; otx2_aura_freeptr(pfvf, pool_id, bufptr); sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; } - otx2_get_page(pool); } return 0; @@ -1204,13 +1197,12 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { pool = &pfvf->qset.pool[pool_id]; for (ptr = 0; ptr < num_ptrs; ptr++) { - bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL); + bufptr = otx2_alloc_rbuf(pfvf, pool); if (bufptr <= 0) return bufptr; otx2_aura_freeptr(pfvf, pool_id, bufptr + OTX2_HEAD_ROOM); } - otx2_get_page(pool); } return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 0b1c653b3449fb44d980f2b4cb2f161f373c3f16..2fa29889522eba10bd0bd33a68da3e2dd67ef1cd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -434,18 +434,6 @@ static inline void otx2_aura_freeptr(struct otx2_nic *pfvf, otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0)); } -/* Update page ref count */ -static inline void otx2_get_page(struct otx2_pool *pool) -{ - if (!pool->page) - return; - - if (pool->pageref) - page_ref_add(pool->page, pool->pageref); - pool->pageref = 0; - pool->page = NULL; -} - static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) { if (type == AURA_NIX_SQ) @@ -589,8 +577,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl); int otx2_txsch_alloc(struct otx2_nic *pfvf); int otx2_txschq_stop(struct otx2_nic *pfvf); void otx2_sqb_flush(struct otx2_nic *pfvf); -dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, - gfp_t gfp); +dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool); int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 45abe0cd0e7b584ed620b63d6c314c555252eba5..b04f5429d72d911da145ee13b99506d84a9d7925 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -286,7 +286,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, /* Refill pool with new buffers */ while (cq->pool_ptrs) { - bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC); + bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool); if (unlikely(bufptr <= 0)) { struct refill_work *work; struct delayed_work *dwork; @@ -304,7 +304,6 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); cq->pool_ptrs--; } - otx2_get_page(cq->rbpool); return processed_cqe; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 4ab32d3adb780217090716f91c2c0b29bad80b1b..da97f2d4416fadfe7e4f1a53e68e342525cea577 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -113,11 +113,7 @@ struct otx2_cq_poll { struct otx2_pool { struct qmem *stack; struct qmem *fc_addr; - u8 rbpage_order; u16 rbsize; - u32 page_offset; - u16 pageref; - struct page *page; }; struct otx2_cq_queue {