2021-10-21 14:55:48 -07:00
// SPDX-License-Identifier: BSD-2-Clause-Views
2019-07-17 18:13:51 -07:00
/*
2023-06-26 11:44:57 -07:00
* Copyright ( c ) 2019 - 2023 The Regents of the University of California
2021-10-21 14:55:48 -07:00
*/
2019-07-17 18:13:51 -07:00
# include "mqnic.h"
2023-05-02 21:23:30 -07:00
struct mqnic_ring * mqnic_create_rx_ring ( struct mqnic_if * interface )
2019-07-17 18:13:51 -07:00
{
2021-10-08 18:31:53 -07:00
struct mqnic_ring * ring ;
ring = kzalloc ( sizeof ( * ring ) , GFP_KERNEL ) ;
2021-10-21 14:01:29 -07:00
if ( ! ring )
2023-04-30 21:57:32 -07:00
return ERR_PTR ( - ENOMEM ) ;
2022-01-16 00:04:53 -08:00
2021-12-12 17:28:43 -08:00
ring - > dev = interface - > dev ;
ring - > interface = interface ;
2021-12-10 20:59:44 -08:00
2023-05-02 21:23:30 -07:00
ring - > index = - 1 ;
ring - > enabled = 0 ;
2021-12-10 21:03:46 -08:00
2023-05-02 21:23:30 -07:00
ring - > hw_addr = NULL ;
2021-12-12 01:52:24 -08:00
2023-07-07 01:19:19 -07:00
ring - > prod_ptr = 0 ;
ring - > cons_ptr = 0 ;
2021-12-12 01:52:24 -08:00
2023-04-30 21:57:32 -07:00
return ring ;
2021-12-12 01:52:24 -08:00
}
2023-04-30 21:57:32 -07:00
void mqnic_destroy_rx_ring ( struct mqnic_ring * ring )
2021-12-12 01:52:24 -08:00
{
2023-05-02 21:23:30 -07:00
mqnic_close_rx_ring ( ring ) ;
2021-12-12 01:52:24 -08:00
kfree ( ring ) ;
}
2023-05-02 21:23:30 -07:00
int mqnic_open_rx_ring ( struct mqnic_ring * ring , struct mqnic_priv * priv ,
struct mqnic_cq * cq , int size , int desc_block_size )
2021-12-12 01:52:24 -08:00
{
2022-08-15 23:50:36 -07:00
int ret = 0 ;
2021-12-12 01:52:24 -08:00
2023-05-02 21:23:30 -07:00
if ( ring - > enabled | | ring - > hw_addr | | ring - > buf | | ! priv | | ! cq )
2021-12-12 01:52:24 -08:00
return - EINVAL ;
2023-05-02 21:23:30 -07:00
ring - > index = mqnic_res_alloc ( ring - > interface - > rxq_res ) ;
if ( ring - > index < 0 )
return - ENOMEM ;
2021-10-08 18:31:53 -07:00
2023-05-02 21:23:30 -07:00
ring - > log_desc_block_size = desc_block_size < 2 ? 0 : ilog2 ( desc_block_size - 1 ) + 1 ;
2021-10-08 18:31:53 -07:00
ring - > desc_block_size = 1 < < ring - > log_desc_block_size ;
2023-05-02 21:23:30 -07:00
ring - > size = roundup_pow_of_two ( size ) ;
ring - > full_size = ring - > size > > 1 ;
ring - > size_mask = ring - > size - 1 ;
ring - > stride = roundup_pow_of_two ( MQNIC_DESC_SIZE * ring - > desc_block_size ) ;
2021-10-08 18:31:53 -07:00
ring - > rx_info = kvzalloc ( sizeof ( * ring - > rx_info ) * ring - > size , GFP_KERNEL ) ;
2023-05-02 21:23:30 -07:00
if ( ! ring - > rx_info ) {
ret = - ENOMEM ;
goto fail ;
}
2021-10-08 18:31:53 -07:00
ring - > buf_size = ring - > size * ring - > stride ;
2021-12-12 01:52:24 -08:00
ring - > buf = dma_alloc_coherent ( ring - > dev , ring - > buf_size , & ring - > buf_dma_addr , GFP_KERNEL ) ;
2021-10-08 18:31:53 -07:00
if ( ! ring - > buf ) {
ret = - ENOMEM ;
2022-08-15 23:50:36 -07:00
goto fail ;
2021-10-08 18:31:53 -07:00
}
2023-05-02 21:23:30 -07:00
ring - > priv = priv ;
ring - > cq = cq ;
cq - > src_ring = ring ;
cq - > handler = mqnic_rx_irq ;
ring - > hw_addr = mqnic_res_get_addr ( ring - > interface - > rxq_res , ring - > index ) ;
2023-07-07 01:19:19 -07:00
ring - > prod_ptr = 0 ;
ring - > cons_ptr = 0 ;
2021-10-08 18:31:53 -07:00
// deactivate queue
2023-07-07 01:19:19 -07:00
iowrite32 ( MQNIC_QUEUE_CMD_SET_ENABLE | 0 ,
ring - > hw_addr + MQNIC_QUEUE_CTRL_STATUS_REG ) ;
2021-10-08 18:31:53 -07:00
// set base address
2023-07-07 01:19:19 -07:00
iowrite32 ( ( ring - > buf_dma_addr & 0xfffff000 ) ,
ring - > hw_addr + MQNIC_QUEUE_BASE_ADDR_VF_REG + 0 ) ;
iowrite32 ( ring - > buf_dma_addr > > 32 ,
ring - > hw_addr + MQNIC_QUEUE_BASE_ADDR_VF_REG + 4 ) ;
// set size
iowrite32 ( MQNIC_QUEUE_CMD_SET_SIZE | ilog2 ( ring - > size ) | ( ring - > log_desc_block_size < < 8 ) ,
ring - > hw_addr + MQNIC_QUEUE_CTRL_STATUS_REG ) ;
2023-05-02 21:23:30 -07:00
// set CQN
2023-07-07 01:19:19 -07:00
iowrite32 ( MQNIC_QUEUE_CMD_SET_CQN | ring - > cq - > cqn ,
ring - > hw_addr + MQNIC_QUEUE_CTRL_STATUS_REG ) ;
2021-10-08 18:31:53 -07:00
// set pointers
2023-07-07 01:19:19 -07:00
iowrite32 ( MQNIC_QUEUE_CMD_SET_PROD_PTR | ( ring - > prod_ptr & MQNIC_QUEUE_PTR_MASK ) ,
ring - > hw_addr + MQNIC_QUEUE_CTRL_STATUS_REG ) ;
iowrite32 ( MQNIC_QUEUE_CMD_SET_CONS_PTR | ( ring - > cons_ptr & MQNIC_QUEUE_PTR_MASK ) ,
ring - > hw_addr + MQNIC_QUEUE_CTRL_STATUS_REG ) ;
2021-10-08 18:31:53 -07:00
2022-06-29 11:56:22 +02:00
ret = mqnic_refill_rx_buffers ( ring ) ;
if ( ret ) {
netdev_err ( priv - > ndev , " failed to allocate RX buffer for RX queue index %d (of %u total) entry index %u (of %u total) " ,
2023-07-07 01:19:19 -07:00
ring - > index , priv - > rxq_count , ring - > prod_ptr , ring - > size ) ;
2022-06-29 11:56:22 +02:00
if ( ret = = - ENOMEM )
netdev_err ( priv - > ndev , " machine might not have enough DMA-capable RAM; try to decrease number of RX channels (currently %u) and/or RX ring parameters (entries; currently %u) and/or module parameter \" num_rxq_entries \" (currently %u) " ,
priv - > rxq_count , ring - > size , mqnic_num_rxq_entries ) ;
goto fail ;
}
2023-05-02 21:23:30 -07:00
2021-10-08 18:31:53 -07:00
return 0 ;
2019-07-17 18:13:51 -07:00
2022-08-15 23:50:36 -07:00
fail :
2023-05-02 21:23:30 -07:00
mqnic_close_rx_ring ( ring ) ;
2021-10-08 18:31:53 -07:00
return ret ;
2019-07-17 18:13:51 -07:00
}
2023-05-02 21:23:30 -07:00
void mqnic_close_rx_ring ( struct mqnic_ring * ring )
2019-07-17 18:13:51 -07:00
{
2023-05-02 21:23:30 -07:00
mqnic_disable_rx_ring ( ring ) ;
if ( ring - > cq ) {
ring - > cq - > src_ring = NULL ;
ring - > cq - > handler = NULL ;
}
ring - > priv = NULL ;
ring - > cq = NULL ;
ring - > hw_addr = NULL ;
2019-07-17 18:13:51 -07:00
2022-08-15 23:50:36 -07:00
if ( ring - > buf ) {
mqnic_free_rx_buf ( ring ) ;
2019-07-17 18:13:51 -07:00
2022-08-15 23:50:36 -07:00
dma_free_coherent ( ring - > dev , ring - > buf_size , ring - > buf , ring - > buf_dma_addr ) ;
ring - > buf = NULL ;
ring - > buf_dma_addr = 0 ;
}
2021-12-12 01:52:24 -08:00
2022-08-15 23:50:36 -07:00
if ( ring - > rx_info ) {
kvfree ( ring - > rx_info ) ;
ring - > rx_info = NULL ;
}
2023-05-02 21:23:30 -07:00
mqnic_res_free ( ring - > interface - > rxq_res , ring - > index ) ;
ring - > index = - 1 ;
2019-07-17 18:13:51 -07:00
}
2023-05-02 21:23:30 -07:00
int mqnic_enable_rx_ring ( struct mqnic_ring * ring )
2019-07-17 18:13:51 -07:00
{
2023-05-02 21:23:30 -07:00
if ( ! ring - > hw_addr )
2021-12-12 01:52:24 -08:00
return - EINVAL ;
2023-05-02 21:23:30 -07:00
// enable queue
2023-07-07 01:19:19 -07:00
iowrite32 ( MQNIC_QUEUE_CMD_SET_ENABLE | 1 ,
ring - > hw_addr + MQNIC_QUEUE_CTRL_STATUS_REG ) ;
2021-10-08 18:31:53 -07:00
2023-05-02 21:23:30 -07:00
ring - > enabled = 1 ;
2021-10-08 18:31:53 -07:00
return 0 ;
2019-07-17 18:13:51 -07:00
}
2023-05-02 21:23:30 -07:00
void mqnic_disable_rx_ring ( struct mqnic_ring * ring )
2019-07-17 18:13:51 -07:00
{
2023-05-02 21:23:30 -07:00
// disable queue
if ( ring - > hw_addr ) {
2023-07-07 01:19:19 -07:00
iowrite32 ( MQNIC_QUEUE_CMD_SET_ENABLE | 0 ,
ring - > hw_addr + MQNIC_QUEUE_CTRL_STATUS_REG ) ;
2021-12-12 14:20:56 -08:00
}
2023-05-02 21:23:30 -07:00
ring - > enabled = 0 ;
2019-07-17 18:13:51 -07:00
}
bool mqnic_is_rx_ring_empty ( const struct mqnic_ring * ring )
{
2023-07-07 01:19:19 -07:00
return ring - > prod_ptr = = ring - > cons_ptr ;
2019-07-17 18:13:51 -07:00
}
bool mqnic_is_rx_ring_full ( const struct mqnic_ring * ring )
{
2023-07-07 01:19:19 -07:00
return ring - > prod_ptr - ring - > cons_ptr > = ring - > size ;
2019-07-17 18:13:51 -07:00
}
2023-07-07 01:19:19 -07:00
void mqnic_rx_read_cons_ptr ( struct mqnic_ring * ring )
2019-07-17 18:13:51 -07:00
{
2023-07-07 01:19:19 -07:00
ring - > cons_ptr + = ( ( ioread32 ( ring - > hw_addr + MQNIC_QUEUE_PTR_REG ) > > 16 ) - ring - > cons_ptr ) & MQNIC_QUEUE_PTR_MASK ;
2019-07-17 18:13:51 -07:00
}
2023-07-07 01:19:19 -07:00
void mqnic_rx_write_prod_ptr ( struct mqnic_ring * ring )
2019-07-17 18:13:51 -07:00
{
2023-07-07 01:19:19 -07:00
iowrite32 ( MQNIC_QUEUE_CMD_SET_PROD_PTR | ( ring - > prod_ptr & MQNIC_QUEUE_PTR_MASK ) ,
ring - > hw_addr + MQNIC_QUEUE_CTRL_STATUS_REG ) ;
2019-07-17 18:13:51 -07:00
}
2021-12-10 20:59:44 -08:00
void mqnic_free_rx_desc ( struct mqnic_ring * ring , int index )
2019-07-17 18:13:51 -07:00
{
2021-10-08 18:31:53 -07:00
struct mqnic_rx_info * rx_info = & ring - > rx_info [ index ] ;
2023-07-07 01:19:19 -07:00
// struct page *page = rx_info->page;
if ( ! rx_info - > page )
return ;
2021-10-08 18:31:53 -07:00
2021-12-10 20:59:44 -08:00
dma_unmap_page ( ring - > dev , dma_unmap_addr ( rx_info , dma_addr ) ,
2022-08-06 01:27:19 -07:00
dma_unmap_len ( rx_info , len ) , DMA_FROM_DEVICE ) ;
2021-10-08 18:31:53 -07:00
rx_info - > dma_addr = 0 ;
2023-07-07 01:19:19 -07:00
__free_pages ( rx_info - > page , rx_info - > page_order ) ;
2021-10-08 18:31:53 -07:00
rx_info - > page = NULL ;
2019-07-17 18:13:51 -07:00
}
2021-12-10 20:59:44 -08:00
int mqnic_free_rx_buf ( struct mqnic_ring * ring )
2019-07-17 18:13:51 -07:00
{
2021-10-08 18:31:53 -07:00
u32 index ;
int cnt = 0 ;
while ( ! mqnic_is_rx_ring_empty ( ring ) ) {
2023-07-07 01:19:19 -07:00
index = ring - > cons_ptr & ring - > size_mask ;
2021-12-10 20:59:44 -08:00
mqnic_free_rx_desc ( ring , index ) ;
2023-07-07 01:19:19 -07:00
ring - > cons_ptr + + ;
2021-10-08 18:31:53 -07:00
cnt + + ;
}
return cnt ;
2019-07-17 18:13:51 -07:00
}
2021-12-10 20:59:44 -08:00
int mqnic_prepare_rx_desc ( struct mqnic_ring * ring , int index )
2019-07-17 18:13:51 -07:00
{
2021-10-08 18:31:53 -07:00
struct mqnic_rx_info * rx_info = & ring - > rx_info [ index ] ;
struct mqnic_desc * rx_desc = ( struct mqnic_desc * ) ( ring - > buf + index * ring - > stride ) ;
struct page * page = rx_info - > page ;
u32 page_order = ring - > page_order ;
u32 len = PAGE_SIZE < < page_order ;
dma_addr_t dma_addr ;
if ( unlikely ( page ) ) {
2021-12-12 17:28:43 -08:00
dev_err ( ring - > dev , " %s: skb not yet processed on interface %d " ,
__func__ , ring - > interface - > index ) ;
2021-10-08 18:31:53 -07:00
return - 1 ;
}
page = dev_alloc_pages ( page_order ) ;
if ( unlikely ( ! page ) ) {
2021-12-12 17:28:43 -08:00
dev_err ( ring - > dev , " %s: failed to allocate memory on interface %d " ,
__func__ , ring - > interface - > index ) ;
2022-06-29 11:56:22 +02:00
return - ENOMEM ;
2021-10-08 18:31:53 -07:00
}
// map page
2022-08-06 01:27:19 -07:00
dma_addr = dma_map_page ( ring - > dev , page , 0 , len , DMA_FROM_DEVICE ) ;
2021-10-08 18:31:53 -07:00
2021-12-10 20:59:44 -08:00
if ( unlikely ( dma_mapping_error ( ring - > dev , dma_addr ) ) ) {
2021-12-12 17:28:43 -08:00
dev_err ( ring - > dev , " %s: DMA mapping failed on interface %d " ,
__func__ , ring - > interface - > index ) ;
2021-10-08 18:31:53 -07:00
__free_pages ( page , page_order ) ;
return - 1 ;
}
// write descriptor
rx_desc - > len = cpu_to_le32 ( len ) ;
rx_desc - > addr = cpu_to_le64 ( dma_addr ) ;
// update rx_info
rx_info - > page = page ;
rx_info - > page_order = page_order ;
rx_info - > page_offset = 0 ;
rx_info - > dma_addr = dma_addr ;
rx_info - > len = len ;
return 0 ;
2019-07-17 18:13:51 -07:00
}
2022-06-29 11:56:22 +02:00
int mqnic_refill_rx_buffers ( struct mqnic_ring * ring )
2019-07-17 18:13:51 -07:00
{
2023-07-07 01:19:19 -07:00
u32 missing = ring - > size - ( ring - > prod_ptr - ring - > cons_ptr ) ;
2022-06-29 11:56:22 +02:00
int ret = 0 ;
2019-07-17 18:13:51 -07:00
2021-10-08 18:31:53 -07:00
if ( missing < 8 )
2022-06-29 11:56:22 +02:00
return 0 ;
2019-07-17 18:13:51 -07:00
2021-10-08 18:31:53 -07:00
for ( ; missing - - > 0 ; ) {
2023-07-07 01:19:19 -07:00
ret = mqnic_prepare_rx_desc ( ring , ring - > prod_ptr & ring - > size_mask ) ;
2022-06-29 11:56:22 +02:00
if ( ret )
2021-10-08 18:31:53 -07:00
break ;
2023-07-07 01:19:19 -07:00
ring - > prod_ptr + + ;
2021-10-08 18:31:53 -07:00
}
2019-07-17 18:13:51 -07:00
2021-10-08 18:31:53 -07:00
// enqueue on NIC
dma_wmb ( ) ;
2023-07-07 01:19:19 -07:00
mqnic_rx_write_prod_ptr ( ring ) ;
2022-06-29 11:56:22 +02:00
return ret ;
2019-07-17 18:13:51 -07:00
}
2023-04-30 21:48:34 -07:00
int mqnic_process_rx_cq ( struct mqnic_cq * cq , int napi_budget )
2019-07-17 18:13:51 -07:00
{
2023-04-30 21:48:34 -07:00
struct mqnic_if * interface = cq - > interface ;
2021-12-12 17:28:43 -08:00
struct device * dev = interface - > dev ;
2023-04-30 21:48:34 -07:00
struct mqnic_ring * rx_ring = cq - > src_ring ;
2021-12-12 17:28:43 -08:00
struct mqnic_priv * priv = rx_ring - > priv ;
2021-10-08 18:31:53 -07:00
struct mqnic_rx_info * rx_info ;
struct mqnic_cpl * cpl ;
struct sk_buff * skb ;
struct page * page ;
u32 cq_index ;
2023-07-07 01:19:19 -07:00
u32 cq_cons_ptr ;
2021-10-08 18:31:53 -07:00
u32 ring_index ;
2023-07-07 01:19:19 -07:00
u32 ring_cons_ptr ;
2021-10-08 18:31:53 -07:00
int done = 0 ;
int budget = napi_budget ;
u32 len ;
2021-12-12 17:28:43 -08:00
if ( unlikely ( ! priv | | ! priv - > port_up ) )
2021-10-08 18:31:53 -07:00
return done ;
// process completion queue
2023-07-07 01:19:19 -07:00
cq_cons_ptr = cq - > cons_ptr ;
cq_index = cq_cons_ptr & cq - > size_mask ;
2021-10-08 18:31:53 -07:00
2023-04-06 20:43:13 -07:00
while ( done < budget ) {
2023-04-30 21:48:34 -07:00
cpl = ( struct mqnic_cpl * ) ( cq - > buf + cq_index * cq - > stride ) ;
2023-04-06 20:43:13 -07:00
2023-07-07 01:19:19 -07:00
if ( ! ! ( cpl - > phase & cpu_to_le32 ( 0x80000000 ) ) = = ! ! ( cq_cons_ptr & cq - > size ) )
2023-04-06 20:43:13 -07:00
break ;
dma_rmb ( ) ;
2021-12-12 01:37:55 -08:00
ring_index = le16_to_cpu ( cpl - > index ) & rx_ring - > size_mask ;
rx_info = & rx_ring - > rx_info [ ring_index ] ;
2021-10-08 18:31:53 -07:00
page = rx_info - > page ;
2023-08-23 17:50:20 -07:00
len = min_t ( u32 , le16_to_cpu ( cpl - > len ) , rx_info - > len ) ;
if ( len < ETH_HLEN ) {
netdev_warn ( priv - > ndev , " %s: ring %d dropping short frame (length %d) " ,
__func__ , rx_ring - > index , len ) ;
rx_ring - > dropped_packets + + ;
goto rx_drop ;
}
2021-10-08 18:31:53 -07:00
if ( unlikely ( ! page ) ) {
2023-05-03 22:24:19 -07:00
netdev_err ( priv - > ndev , " %s: ring %d null page at index %d " ,
2023-04-30 21:48:34 -07:00
__func__ , rx_ring - > index , ring_index ) ;
2021-10-08 18:31:53 -07:00
print_hex_dump ( KERN_ERR , " " , DUMP_PREFIX_NONE , 16 , 1 ,
2021-10-21 13:54:00 -07:00
cpl , MQNIC_CPL_SIZE , true ) ;
2021-10-08 18:31:53 -07:00
break ;
}
2023-04-30 21:48:34 -07:00
skb = napi_get_frags ( & cq - > napi ) ;
2021-10-08 18:31:53 -07:00
if ( unlikely ( ! skb ) ) {
2023-05-03 22:24:19 -07:00
netdev_err ( priv - > ndev , " %s: ring %d failed to allocate skb " ,
2023-04-30 21:48:34 -07:00
__func__ , rx_ring - > index ) ;
2021-10-08 18:31:53 -07:00
break ;
}
// RX hardware timestamp
2022-01-15 21:53:13 -08:00
if ( interface - > if_features & MQNIC_IF_FEATURE_PTP_TS )
2021-12-12 17:28:43 -08:00
skb_hwtstamps ( skb ) - > hwtstamp = mqnic_read_cpl_ts ( interface - > mdev , rx_ring , cpl ) ;
2021-10-08 18:31:53 -07:00
2021-12-12 13:46:09 -08:00
skb_record_rx_queue ( skb , rx_ring - > index ) ;
2021-10-08 18:31:53 -07:00
// RX hardware checksum
2021-12-12 17:28:43 -08:00
if ( priv - > ndev - > features & NETIF_F_RXCSUM ) {
2021-10-08 18:31:53 -07:00
skb - > csum = csum_unfold ( ( __sum16 ) cpu_to_be16 ( le16_to_cpu ( cpl - > rx_csum ) ) ) ;
skb - > ip_summed = CHECKSUM_COMPLETE ;
}
// unmap
2021-12-12 17:28:43 -08:00
dma_unmap_page ( dev , dma_unmap_addr ( rx_info , dma_addr ) ,
2022-08-06 01:27:19 -07:00
dma_unmap_len ( rx_info , len ) , DMA_FROM_DEVICE ) ;
2021-10-08 18:31:53 -07:00
rx_info - > dma_addr = 0 ;
2021-12-12 17:28:43 -08:00
dma_sync_single_range_for_cpu ( dev , rx_info - > dma_addr , rx_info - > page_offset ,
2022-08-06 01:27:19 -07:00
rx_info - > len , DMA_FROM_DEVICE ) ;
2021-10-08 18:31:53 -07:00
__skb_fill_page_desc ( skb , 0 , page , rx_info - > page_offset , len ) ;
rx_info - > page = NULL ;
skb_shinfo ( skb ) - > nr_frags = 1 ;
skb - > len = len ;
skb - > data_len = len ;
skb - > truesize + = rx_info - > len ;
// hand off SKB
2023-04-30 21:48:34 -07:00
napi_gro_frags ( & cq - > napi ) ;
2021-10-08 18:31:53 -07:00
2021-12-12 01:37:55 -08:00
rx_ring - > packets + + ;
rx_ring - > bytes + = le16_to_cpu ( cpl - > len ) ;
2021-10-08 18:31:53 -07:00
2023-08-23 17:50:20 -07:00
rx_drop :
2021-10-08 18:31:53 -07:00
done + + ;
2023-07-07 01:19:19 -07:00
cq_cons_ptr + + ;
cq_index = cq_cons_ptr & cq - > size_mask ;
2021-10-08 18:31:53 -07:00
}
2023-07-07 01:19:19 -07:00
// update CQ consumer pointer
cq - > cons_ptr = cq_cons_ptr ;
mqnic_cq_write_cons_ptr ( cq ) ;
2021-10-08 18:31:53 -07:00
// process ring
2023-07-07 01:19:19 -07:00
ring_cons_ptr = READ_ONCE ( rx_ring - > cons_ptr ) ;
ring_index = ring_cons_ptr & rx_ring - > size_mask ;
2021-10-08 18:31:53 -07:00
2023-07-07 01:19:19 -07:00
while ( ring_cons_ptr ! = rx_ring - > prod_ptr ) {
2021-12-12 01:37:55 -08:00
rx_info = & rx_ring - > rx_info [ ring_index ] ;
2019-07-17 18:13:51 -07:00
2021-10-08 18:31:53 -07:00
if ( rx_info - > page )
break ;
2019-07-17 18:13:51 -07:00
2023-07-07 01:19:19 -07:00
ring_cons_ptr + + ;
ring_index = ring_cons_ptr & rx_ring - > size_mask ;
2021-10-08 18:31:53 -07:00
}
2019-07-17 18:13:51 -07:00
2023-07-07 01:19:19 -07:00
// update consumer pointer
WRITE_ONCE ( rx_ring - > cons_ptr , ring_cons_ptr ) ;
2019-07-17 18:13:51 -07:00
2021-10-08 18:31:53 -07:00
// replenish buffers
2021-12-12 01:37:55 -08:00
mqnic_refill_rx_buffers ( rx_ring ) ;
2019-07-17 18:13:51 -07:00
2021-10-08 18:31:53 -07:00
return done ;
2019-07-17 18:13:51 -07:00
}
2023-04-30 21:48:34 -07:00
void mqnic_rx_irq ( struct mqnic_cq * cq )
2019-07-17 18:13:51 -07:00
{
2021-12-12 17:28:43 -08:00
napi_schedule_irqoff ( & cq - > napi ) ;
2019-07-17 18:13:51 -07:00
}
int mqnic_poll_rx_cq ( struct napi_struct * napi , int budget )
{
2023-04-30 21:48:34 -07:00
struct mqnic_cq * cq = container_of ( napi , struct mqnic_cq , napi ) ;
2021-10-08 18:31:53 -07:00
int done ;
2019-07-17 18:13:51 -07:00
2023-04-30 21:48:34 -07:00
done = mqnic_process_rx_cq ( cq , budget ) ;
2020-03-10 22:06:02 -07:00
2021-10-08 18:31:53 -07:00
if ( done = = budget )
return done ;
2019-07-17 18:13:51 -07:00
2021-10-08 18:31:53 -07:00
napi_complete ( napi ) ;
2019-07-17 18:13:51 -07:00
2023-04-30 21:48:34 -07:00
mqnic_arm_cq ( cq ) ;
2019-07-17 18:13:51 -07:00
2021-10-08 18:31:53 -07:00
return done ;
2019-07-17 18:13:51 -07:00
}