mirror of
https://github.com/corundum/corundum.git
synced 2025-01-30 08:32:52 +08:00
Advance TX/RX queue pointers based on completion records instead of MMIO reads
Signed-off-by: Alex Forencich <alex@alexforencich.com>
This commit is contained in:
parent
147f09e62e
commit
a8feaf2383
@ -663,7 +663,6 @@ class TxRing:
|
||||
|
||||
self.head_ptr = 0
|
||||
self.tail_ptr = 0
|
||||
self.clean_tail_ptr = 0
|
||||
|
||||
self.clean_event = Event()
|
||||
|
||||
@ -748,10 +747,10 @@ class TxRing:
|
||||
self.active = False
|
||||
|
||||
def empty(self):
|
||||
return self.head_ptr == self.clean_tail_ptr
|
||||
return self.head_ptr == self.tail_ptr
|
||||
|
||||
def full(self):
|
||||
return self.head_ptr - self.clean_tail_ptr >= self.full_size
|
||||
return self.head_ptr - self.tail_ptr >= self.full_size
|
||||
|
||||
async def read_tail_ptr(self):
|
||||
val = await self.hw_regs.read_dword(MQNIC_QUEUE_TAIL_PTR_REG)
|
||||
@ -767,9 +766,9 @@ class TxRing:
|
||||
|
||||
def free_buf(self):
|
||||
while not self.empty():
|
||||
index = self.clean_tail_ptr & self.size_mask
|
||||
index = self.tail_ptr & self.size_mask
|
||||
self.free_desc(index)
|
||||
self.clean_tail_ptr += 1
|
||||
self.tail_ptr += 1
|
||||
|
||||
@staticmethod
|
||||
async def process_tx_cq(cq):
|
||||
@ -805,19 +804,17 @@ class TxRing:
|
||||
await cq.write_tail_ptr()
|
||||
|
||||
# process ring
|
||||
await ring.read_tail_ptr()
|
||||
ring_tail_ptr = ring.tail_ptr
|
||||
ring_index = ring_tail_ptr & ring.size_mask
|
||||
|
||||
ring_clean_tail_ptr = ring.clean_tail_ptr
|
||||
ring_index = ring_clean_tail_ptr & ring.size_mask
|
||||
|
||||
while (ring_clean_tail_ptr != ring.tail_ptr):
|
||||
while (ring_tail_ptr != ring.head_ptr):
|
||||
if ring.tx_info[ring_index]:
|
||||
break
|
||||
|
||||
ring_clean_tail_ptr += 1
|
||||
ring_index = ring_clean_tail_ptr & ring.size_mask
|
||||
ring_tail_ptr += 1
|
||||
ring_index = ring_tail_ptr & ring.size_mask
|
||||
|
||||
ring.clean_tail_ptr = ring_clean_tail_ptr
|
||||
ring.tail_ptr = ring_tail_ptr
|
||||
|
||||
ring.clean_event.set()
|
||||
|
||||
@ -846,7 +843,6 @@ class RxRing:
|
||||
|
||||
self.head_ptr = 0
|
||||
self.tail_ptr = 0
|
||||
self.clean_tail_ptr = 0
|
||||
|
||||
self.packets = 0
|
||||
self.bytes = 0
|
||||
@ -931,10 +927,10 @@ class RxRing:
|
||||
self.active = False
|
||||
|
||||
def empty(self):
|
||||
return self.head_ptr == self.clean_tail_ptr
|
||||
return self.head_ptr == self.tail_ptr
|
||||
|
||||
def full(self):
|
||||
return self.head_ptr - self.clean_tail_ptr >= self.full_size
|
||||
return self.head_ptr - self.tail_ptr >= self.full_size
|
||||
|
||||
async def read_tail_ptr(self):
|
||||
val = await self.hw_regs.read_dword(MQNIC_QUEUE_TAIL_PTR_REG)
|
||||
@ -950,9 +946,9 @@ class RxRing:
|
||||
|
||||
def free_buf(self):
|
||||
while not self.empty():
|
||||
index = self.clean_tail_ptr & self.size_mask
|
||||
index = self.tail_ptr & self.size_mask
|
||||
self.free_desc(index)
|
||||
self.clean_tail_ptr += 1
|
||||
self.tail_ptr += 1
|
||||
|
||||
def prepare_desc(self, index):
|
||||
pkt = self.driver.alloc_pkt()
|
||||
@ -969,7 +965,7 @@ class RxRing:
|
||||
offset += seg
|
||||
|
||||
async def refill_buffers(self):
|
||||
missing = self.size - (self.head_ptr - self.clean_tail_ptr)
|
||||
missing = self.size - (self.head_ptr - self.tail_ptr)
|
||||
|
||||
if missing < 8:
|
||||
return
|
||||
@ -1029,19 +1025,17 @@ class RxRing:
|
||||
await cq.write_tail_ptr()
|
||||
|
||||
# process ring
|
||||
await ring.read_tail_ptr()
|
||||
ring_tail_ptr = ring.tail_ptr
|
||||
ring_index = ring_tail_ptr & ring.size_mask
|
||||
|
||||
ring_clean_tail_ptr = ring.clean_tail_ptr
|
||||
ring_index = ring_clean_tail_ptr & ring.size_mask
|
||||
|
||||
while (ring_clean_tail_ptr != ring.tail_ptr):
|
||||
while (ring_tail_ptr != ring.head_ptr):
|
||||
if ring.rx_info[ring_index]:
|
||||
break
|
||||
|
||||
ring_clean_tail_ptr += 1
|
||||
ring_index = ring_clean_tail_ptr & ring.size_mask
|
||||
ring_tail_ptr += 1
|
||||
ring_index = ring_tail_ptr & ring.size_mask
|
||||
|
||||
ring.clean_tail_ptr = ring_clean_tail_ptr
|
||||
ring.tail_ptr = ring_tail_ptr
|
||||
|
||||
# replenish buffers
|
||||
await ring.refill_buffers()
|
||||
@ -1414,7 +1408,7 @@ class Interface:
|
||||
|
||||
while True:
|
||||
# check for space in ring
|
||||
if ring.head_ptr - ring.clean_tail_ptr < ring.full_size:
|
||||
if ring.head_ptr - ring.tail_ptr < ring.full_size:
|
||||
break
|
||||
|
||||
# wait for space
|
||||
|
@ -251,7 +251,6 @@ struct mqnic_ring {
|
||||
|
||||
// written from completion
|
||||
u32 tail_ptr ____cacheline_aligned_in_smp;
|
||||
u32 clean_tail_ptr;
|
||||
u64 ts_s;
|
||||
u8 ts_valid;
|
||||
|
||||
|
@ -59,7 +59,6 @@ int mqnic_create_rx_ring(struct mqnic_if *interface, struct mqnic_ring **ring_pt
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
ring->clean_tail_ptr = 0;
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
@ -105,7 +104,6 @@ int mqnic_alloc_rx_ring(struct mqnic_ring *ring, int size, int stride)
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
ring->clean_tail_ptr = 0;
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
@ -199,12 +197,12 @@ void mqnic_deactivate_rx_ring(struct mqnic_ring *ring)
|
||||
|
||||
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring)
|
||||
{
|
||||
return ring->head_ptr == ring->clean_tail_ptr;
|
||||
return ring->head_ptr == ring->tail_ptr;
|
||||
}
|
||||
|
||||
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring)
|
||||
{
|
||||
return ring->head_ptr - ring->clean_tail_ptr >= ring->size;
|
||||
return ring->head_ptr - ring->tail_ptr >= ring->size;
|
||||
}
|
||||
|
||||
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring)
|
||||
@ -235,16 +233,12 @@ int mqnic_free_rx_buf(struct mqnic_ring *ring)
|
||||
int cnt = 0;
|
||||
|
||||
while (!mqnic_is_rx_ring_empty(ring)) {
|
||||
index = ring->clean_tail_ptr & ring->size_mask;
|
||||
index = ring->tail_ptr & ring->size_mask;
|
||||
mqnic_free_rx_desc(ring, index);
|
||||
ring->clean_tail_ptr++;
|
||||
ring->tail_ptr++;
|
||||
cnt++;
|
||||
}
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
ring->clean_tail_ptr = 0;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
@ -296,7 +290,7 @@ int mqnic_prepare_rx_desc(struct mqnic_ring *ring, int index)
|
||||
|
||||
void mqnic_refill_rx_buffers(struct mqnic_ring *ring)
|
||||
{
|
||||
u32 missing = ring->size - (ring->head_ptr - ring->clean_tail_ptr);
|
||||
u32 missing = ring->size - (ring->head_ptr - ring->tail_ptr);
|
||||
|
||||
if (missing < 8)
|
||||
return;
|
||||
@ -325,7 +319,7 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
u32 cq_index;
|
||||
u32 cq_tail_ptr;
|
||||
u32 ring_index;
|
||||
u32 ring_clean_tail_ptr;
|
||||
u32 ring_tail_ptr;
|
||||
int done = 0;
|
||||
int budget = napi_budget;
|
||||
u32 len;
|
||||
@ -410,24 +404,21 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
mqnic_cq_write_tail_ptr(cq_ring);
|
||||
|
||||
// process ring
|
||||
// read tail pointer from NIC
|
||||
mqnic_rx_read_tail_ptr(rx_ring);
|
||||
ring_tail_ptr = READ_ONCE(rx_ring->tail_ptr);
|
||||
ring_index = ring_tail_ptr & rx_ring->size_mask;
|
||||
|
||||
ring_clean_tail_ptr = READ_ONCE(rx_ring->clean_tail_ptr);
|
||||
ring_index = ring_clean_tail_ptr & rx_ring->size_mask;
|
||||
|
||||
while (ring_clean_tail_ptr != rx_ring->tail_ptr) {
|
||||
while (ring_tail_ptr != rx_ring->head_ptr) {
|
||||
rx_info = &rx_ring->rx_info[ring_index];
|
||||
|
||||
if (rx_info->page)
|
||||
break;
|
||||
|
||||
ring_clean_tail_ptr++;
|
||||
ring_index = ring_clean_tail_ptr & rx_ring->size_mask;
|
||||
ring_tail_ptr++;
|
||||
ring_index = ring_tail_ptr & rx_ring->size_mask;
|
||||
}
|
||||
|
||||
// update ring tail
|
||||
WRITE_ONCE(rx_ring->clean_tail_ptr, ring_clean_tail_ptr);
|
||||
WRITE_ONCE(rx_ring->tail_ptr, ring_tail_ptr);
|
||||
|
||||
// replenish buffers
|
||||
mqnic_refill_rx_buffers(rx_ring);
|
||||
|
@ -60,7 +60,6 @@ int mqnic_create_tx_ring(struct mqnic_if *interface, struct mqnic_ring **ring_pt
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
ring->clean_tail_ptr = 0;
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
@ -107,7 +106,6 @@ int mqnic_alloc_tx_ring(struct mqnic_ring *ring, int size, int stride)
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
ring->clean_tail_ptr = 0;
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
@ -199,12 +197,12 @@ void mqnic_deactivate_tx_ring(struct mqnic_ring *ring)
|
||||
|
||||
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring)
|
||||
{
|
||||
return ring->head_ptr == ring->clean_tail_ptr;
|
||||
return ring->head_ptr == ring->tail_ptr;
|
||||
}
|
||||
|
||||
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring)
|
||||
{
|
||||
return ring->head_ptr - ring->clean_tail_ptr >= ring->full_size;
|
||||
return ring->head_ptr - ring->tail_ptr >= ring->full_size;
|
||||
}
|
||||
|
||||
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring)
|
||||
@ -244,16 +242,12 @@ int mqnic_free_tx_buf(struct mqnic_ring *ring)
|
||||
int cnt = 0;
|
||||
|
||||
while (!mqnic_is_tx_ring_empty(ring)) {
|
||||
index = ring->clean_tail_ptr & ring->size_mask;
|
||||
index = ring->tail_ptr & ring->size_mask;
|
||||
mqnic_free_tx_desc(ring, index, 0);
|
||||
ring->clean_tail_ptr++;
|
||||
ring->tail_ptr++;
|
||||
cnt++;
|
||||
}
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
ring->clean_tail_ptr = 0;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
@ -268,7 +262,7 @@ int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
u32 cq_index;
|
||||
u32 cq_tail_ptr;
|
||||
u32 ring_index;
|
||||
u32 ring_clean_tail_ptr;
|
||||
u32 ring_tail_ptr;
|
||||
u32 packets = 0;
|
||||
u32 bytes = 0;
|
||||
int done = 0;
|
||||
@ -315,24 +309,21 @@ int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
mqnic_cq_write_tail_ptr(cq_ring);
|
||||
|
||||
// process ring
|
||||
// read tail pointer from NIC
|
||||
mqnic_tx_read_tail_ptr(tx_ring);
|
||||
ring_tail_ptr = READ_ONCE(tx_ring->tail_ptr);
|
||||
ring_index = ring_tail_ptr & tx_ring->size_mask;
|
||||
|
||||
ring_clean_tail_ptr = READ_ONCE(tx_ring->clean_tail_ptr);
|
||||
ring_index = ring_clean_tail_ptr & tx_ring->size_mask;
|
||||
|
||||
while (ring_clean_tail_ptr != tx_ring->tail_ptr) {
|
||||
while (ring_tail_ptr != tx_ring->head_ptr) {
|
||||
tx_info = &tx_ring->tx_info[ring_index];
|
||||
|
||||
if (tx_info->skb)
|
||||
break;
|
||||
|
||||
ring_clean_tail_ptr++;
|
||||
ring_index = ring_clean_tail_ptr & tx_ring->size_mask;
|
||||
ring_tail_ptr++;
|
||||
ring_index = ring_tail_ptr & tx_ring->size_mask;
|
||||
}
|
||||
|
||||
// update ring tail
|
||||
WRITE_ONCE(tx_ring->clean_tail_ptr, ring_clean_tail_ptr);
|
||||
WRITE_ONCE(tx_ring->tail_ptr, ring_tail_ptr);
|
||||
|
||||
// BQL
|
||||
//netdev_tx_completed_queue(tx_ring->tx_queue, packets, bytes);
|
||||
@ -445,7 +436,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
int ring_index;
|
||||
u32 index;
|
||||
bool stop_queue;
|
||||
u32 clean_tail_ptr;
|
||||
u32 tail_ptr;
|
||||
|
||||
if (unlikely(!priv->port_up))
|
||||
goto tx_drop;
|
||||
@ -458,7 +449,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
|
||||
ring = priv->tx_ring[ring_index];
|
||||
|
||||
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
|
||||
tail_ptr = READ_ONCE(ring->tail_ptr);
|
||||
|
||||
// prefetch for BQL
|
||||
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
|
||||
@ -544,7 +535,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
if (unlikely(stop_queue)) {
|
||||
smp_rmb();
|
||||
|
||||
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
|
||||
tail_ptr = READ_ONCE(ring->tail_ptr);
|
||||
|
||||
if (unlikely(!mqnic_is_tx_ring_full(ring)))
|
||||
netif_tx_wake_queue(ring->tx_queue);
|
||||
|
Loading…
x
Reference in New Issue
Block a user