mirror of
https://github.com/corundum/corundum.git
synced 2025-01-16 08:12:53 +08:00
Clean up naming in testbenches, driver, and utils
Signed-off-by: Alex Forencich <alex@alexforencich.com>
This commit is contained in:
parent
01680f2ff5
commit
66f5b9fcc1
@ -468,7 +468,7 @@ async def run_test_nic(dut):
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
for k in range(interface.txq_count):
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
@ -661,7 +661,7 @@ async def run_test_nic(dut):
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].set_rx_queue_map_indir_table(block.index, 0, block.index)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
for k in range(block.interface.txq_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
|
@ -468,7 +468,7 @@ async def run_test_nic(dut):
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
for k in range(interface.txq_count):
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
@ -661,7 +661,7 @@ async def run_test_nic(dut):
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].set_rx_queue_map_indir_table(block.index, 0, block.index)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
for k in range(block.interface.txq_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
|
@ -43,11 +43,11 @@ from cocotbext.axi import Window
|
||||
|
||||
import struct
|
||||
|
||||
MQNIC_MAX_EVENT_RINGS = 1
|
||||
MQNIC_MAX_TX_RINGS = 32
|
||||
MQNIC_MAX_TX_CPL_RINGS = 32
|
||||
MQNIC_MAX_RX_RINGS = 8
|
||||
MQNIC_MAX_RX_CPL_RINGS = 8
|
||||
MQNIC_MAX_EQ = 1
|
||||
MQNIC_MAX_TXQ = 32
|
||||
MQNIC_MAX_TX_CQ = MQNIC_MAX_TXQ
|
||||
MQNIC_MAX_RXQ = 8
|
||||
MQNIC_MAX_RX_CQ = MQNIC_MAX_RXQ
|
||||
|
||||
# Register blocks
|
||||
MQNIC_RB_REG_TYPE = 0x00
|
||||
@ -188,11 +188,11 @@ MQNIC_RB_RX_QUEUE_MAP_CH_REG_OFFSET = 0x00
|
||||
MQNIC_RB_RX_QUEUE_MAP_CH_REG_RSS_MASK = 0x04
|
||||
MQNIC_RB_RX_QUEUE_MAP_CH_REG_APP_MASK = 0x08
|
||||
|
||||
MQNIC_RB_EVENT_QM_TYPE = 0x0000C010
|
||||
MQNIC_RB_EVENT_QM_VER = 0x00000200
|
||||
MQNIC_RB_EVENT_QM_REG_OFFSET = 0x0C
|
||||
MQNIC_RB_EVENT_QM_REG_COUNT = 0x10
|
||||
MQNIC_RB_EVENT_QM_REG_STRIDE = 0x14
|
||||
MQNIC_RB_EQM_TYPE = 0x0000C010
|
||||
MQNIC_RB_EQM_VER = 0x00000200
|
||||
MQNIC_RB_EQM_REG_OFFSET = 0x0C
|
||||
MQNIC_RB_EQM_REG_COUNT = 0x10
|
||||
MQNIC_RB_EQM_REG_STRIDE = 0x14
|
||||
|
||||
MQNIC_RB_TX_QM_TYPE = 0x0000C020
|
||||
MQNIC_RB_TX_QM_VER = 0x00000200
|
||||
@ -282,27 +282,27 @@ MQNIC_QUEUE_TAIL_PTR_REG = 0x18
|
||||
|
||||
MQNIC_QUEUE_ACTIVE_MASK = 0x80000000
|
||||
|
||||
MQNIC_CPL_QUEUE_BASE_ADDR_REG = 0x00
|
||||
MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG = 0x08
|
||||
MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG = 0x0C
|
||||
MQNIC_CPL_QUEUE_HEAD_PTR_REG = 0x10
|
||||
MQNIC_CPL_QUEUE_TAIL_PTR_REG = 0x18
|
||||
MQNIC_CQ_BASE_ADDR_REG = 0x00
|
||||
MQNIC_CQ_ACTIVE_LOG_SIZE_REG = 0x08
|
||||
MQNIC_CQ_INTERRUPT_INDEX_REG = 0x0C
|
||||
MQNIC_CQ_HEAD_PTR_REG = 0x10
|
||||
MQNIC_CQ_TAIL_PTR_REG = 0x18
|
||||
|
||||
MQNIC_CPL_QUEUE_ACTIVE_MASK = 0x80000000
|
||||
MQNIC_CQ_ACTIVE_MASK = 0x80000000
|
||||
|
||||
MQNIC_CPL_QUEUE_ARM_MASK = 0x80000000
|
||||
MQNIC_CPL_QUEUE_CONT_MASK = 0x40000000
|
||||
MQNIC_CQ_ARM_MASK = 0x80000000
|
||||
MQNIC_CQ_CONT_MASK = 0x40000000
|
||||
|
||||
MQNIC_EVENT_QUEUE_BASE_ADDR_REG = 0x00
|
||||
MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG = 0x08
|
||||
MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG = 0x0C
|
||||
MQNIC_EVENT_QUEUE_HEAD_PTR_REG = 0x10
|
||||
MQNIC_EVENT_QUEUE_TAIL_PTR_REG = 0x18
|
||||
MQNIC_EQ_BASE_ADDR_REG = 0x00
|
||||
MQNIC_EQ_ACTIVE_LOG_SIZE_REG = 0x08
|
||||
MQNIC_EQ_INTERRUPT_INDEX_REG = 0x0C
|
||||
MQNIC_EQ_HEAD_PTR_REG = 0x10
|
||||
MQNIC_EQ_TAIL_PTR_REG = 0x18
|
||||
|
||||
MQNIC_EVENT_QUEUE_ACTIVE_MASK = 0x80000000
|
||||
MQNIC_EQ_ACTIVE_MASK = 0x80000000
|
||||
|
||||
MQNIC_EVENT_QUEUE_ARM_MASK = 0x80000000
|
||||
MQNIC_EVENT_QUEUE_CONT_MASK = 0x40000000
|
||||
MQNIC_EQ_ARM_MASK = 0x80000000
|
||||
MQNIC_EQ_CONT_MASK = 0x40000000
|
||||
|
||||
MQNIC_EVENT_TYPE_TX_CPL = 0x0000
|
||||
MQNIC_EVENT_TYPE_RX_CPL = 0x0001
|
||||
@ -383,8 +383,8 @@ class Packet:
|
||||
return bytes(self.data)
|
||||
|
||||
|
||||
class EqRing:
|
||||
def __init__(self, interface, index, hw_regs):
|
||||
class Eq:
|
||||
def __init__(self, interface, eqn, hw_regs):
|
||||
self.interface = interface
|
||||
self.log = interface.log
|
||||
self.driver = interface.driver
|
||||
@ -392,7 +392,7 @@ class EqRing:
|
||||
self.size = 0
|
||||
self.size_mask = 0
|
||||
self.stride = 0
|
||||
self.index = index
|
||||
self.eqn = eqn
|
||||
self.active = False
|
||||
|
||||
self.buf_size = 0
|
||||
@ -409,9 +409,9 @@ class EqRing:
|
||||
self.hw_regs = hw_regs
|
||||
|
||||
async def init(self):
|
||||
self.log.info("Init EqRing %d (interface %d)", self.index, self.interface.index)
|
||||
self.log.info("Init EQ %d (interface %d)", self.eqn, self.interface.index)
|
||||
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
|
||||
async def alloc(self, size, stride):
|
||||
if self.active:
|
||||
@ -432,13 +432,13 @@ class EqRing:
|
||||
self.head_ptr = 0
|
||||
self.tail_ptr = 0
|
||||
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_BASE_ADDR_REG, self.buf_dma & 0xffffffff) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_BASE_ADDR_REG+4, self.buf_dma >> 32) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG, 0) # interrupt index
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_HEAD_PTR_REG, self.head_ptr & self.hw_ptr_mask) # head pointer
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask) # tail pointer
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG, self.log_size) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_BASE_ADDR_REG, self.buf_dma & 0xffffffff) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_BASE_ADDR_REG+4, self.buf_dma >> 32) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_INTERRUPT_INDEX_REG, 0) # interrupt index
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_HEAD_PTR_REG, self.head_ptr & self.hw_ptr_mask) # head pointer
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask) # tail pointer
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_ACTIVE_LOG_SIZE_REG, self.log_size) # active, log size
|
||||
|
||||
async def free(self):
|
||||
await self.deactivate()
|
||||
@ -448,7 +448,7 @@ class EqRing:
|
||||
pass
|
||||
|
||||
async def activate(self, irq):
|
||||
self.log.info("Activate EqRing %d (interface %d)", self.index, self.interface.index)
|
||||
self.log.info("Activate Eq %d (interface %d)", self.eqn, self.interface.index)
|
||||
|
||||
await self.deactivate()
|
||||
|
||||
@ -459,42 +459,42 @@ class EqRing:
|
||||
|
||||
self.buf[0:self.buf_size] = b'\x00'*self.buf_size
|
||||
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_BASE_ADDR_REG, self.buf_dma & 0xffffffff) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_BASE_ADDR_REG+4, self.buf_dma >> 32) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG, irq) # interrupt index
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_HEAD_PTR_REG, self.head_ptr & self.hw_ptr_mask) # head pointer
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask) # tail pointer
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG, self.log_size | MQNIC_EVENT_QUEUE_ACTIVE_MASK) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_BASE_ADDR_REG, self.buf_dma & 0xffffffff) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_BASE_ADDR_REG+4, self.buf_dma >> 32) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_INTERRUPT_INDEX_REG, irq) # interrupt index
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_HEAD_PTR_REG, self.head_ptr & self.hw_ptr_mask) # head pointer
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask) # tail pointer
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_ACTIVE_LOG_SIZE_REG, self.log_size | MQNIC_EQ_ACTIVE_MASK) # active, log size
|
||||
|
||||
self.active = True
|
||||
|
||||
async def deactivate(self):
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG, self.log_size) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG, 0) # interrupt index
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_ACTIVE_LOG_SIZE_REG, self.log_size) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_INTERRUPT_INDEX_REG, 0) # interrupt index
|
||||
|
||||
self.irq = None
|
||||
|
||||
self.active = False
|
||||
|
||||
async def read_head_ptr(self):
|
||||
val = await self.hw_regs.read_dword(MQNIC_EVENT_QUEUE_HEAD_PTR_REG)
|
||||
val = await self.hw_regs.read_dword(MQNIC_EQ_HEAD_PTR_REG)
|
||||
self.head_ptr += (val - self.head_ptr) & self.hw_ptr_mask
|
||||
|
||||
async def write_tail_ptr(self):
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask)
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask)
|
||||
|
||||
async def arm(self):
|
||||
if not self.active:
|
||||
return
|
||||
|
||||
await self.hw_regs.write_dword(MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG, self.irq | MQNIC_EVENT_QUEUE_ARM_MASK) # interrupt index
|
||||
await self.hw_regs.write_dword(MQNIC_EQ_INTERRUPT_INDEX_REG, self.irq | MQNIC_EQ_ARM_MASK) # interrupt index
|
||||
|
||||
async def process_eq(self):
|
||||
if not self.interface.port_up:
|
||||
return
|
||||
|
||||
self.log.info("Process event queue")
|
||||
self.log.info("Process EQ")
|
||||
|
||||
eq_tail_ptr = self.tail_ptr
|
||||
eq_index = eq_tail_ptr & self.size_mask
|
||||
@ -502,20 +502,20 @@ class EqRing:
|
||||
while True:
|
||||
event_data = struct.unpack_from("<HHLLLLLLL", self.buf, eq_index*self.stride)
|
||||
|
||||
self.log.info("EQ %d index %d data: %s", self.index, eq_index, repr(event_data))
|
||||
self.log.info("EQ %d index %d data: %s", self.eqn, eq_index, repr(event_data))
|
||||
|
||||
if bool(event_data[-1] & 0x80000000) == bool(eq_tail_ptr & self.size):
|
||||
self.log.info("EQ %d empty", self.index)
|
||||
self.log.info("EQ %d empty", self.eqn)
|
||||
break
|
||||
|
||||
if event_data[0] == 0:
|
||||
# transmit completion
|
||||
cq = self.interface.tx_cpl_queues[event_data[1]]
|
||||
cq = self.interface.tx_cq[event_data[1]]
|
||||
await cq.handler(cq)
|
||||
await cq.arm()
|
||||
elif event_data[0] == 1:
|
||||
# receive completion
|
||||
cq = self.interface.rx_cpl_queues[event_data[1]]
|
||||
cq = self.interface.rx_cq[event_data[1]]
|
||||
await cq.handler(cq)
|
||||
await cq.arm()
|
||||
|
||||
@ -526,8 +526,8 @@ class EqRing:
|
||||
await self.write_tail_ptr()
|
||||
|
||||
|
||||
class CqRing:
|
||||
def __init__(self, interface, index, hw_regs):
|
||||
class Cq:
|
||||
def __init__(self, interface, cqn, hw_regs):
|
||||
self.interface = interface
|
||||
self.log = interface.log
|
||||
self.driver = interface.driver
|
||||
@ -535,7 +535,7 @@ class CqRing:
|
||||
self.size = 0
|
||||
self.size_mask = 0
|
||||
self.stride = 0
|
||||
self.index = index
|
||||
self.cqn = cqn
|
||||
self.active = False
|
||||
|
||||
self.buf_size = 0
|
||||
@ -555,9 +555,9 @@ class CqRing:
|
||||
self.hw_regs = hw_regs
|
||||
|
||||
async def init(self):
|
||||
self.log.info("Init CqRing %d (interface %d)", self.index, self.interface.index)
|
||||
self.log.info("Init CQ %d (interface %d)", self.cqn, self.interface.index)
|
||||
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
|
||||
async def alloc(self, size, stride):
|
||||
if self.active:
|
||||
@ -578,13 +578,13 @@ class CqRing:
|
||||
self.head_ptr = 0
|
||||
self.tail_ptr = 0
|
||||
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_BASE_ADDR_REG, self.buf_dma & 0xffffffff) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_BASE_ADDR_REG+4, self.buf_dma >> 32) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG, 0) # event index
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_HEAD_PTR_REG, self.head_ptr & self.hw_ptr_mask) # head pointer
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask) # tail pointer
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG, self.log_size) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_BASE_ADDR_REG, self.buf_dma & 0xffffffff) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_BASE_ADDR_REG+4, self.buf_dma >> 32) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_INTERRUPT_INDEX_REG, 0) # event index
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_HEAD_PTR_REG, self.head_ptr & self.hw_ptr_mask) # head pointer
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask) # tail pointer
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_ACTIVE_LOG_SIZE_REG, self.log_size) # active, log size
|
||||
|
||||
async def free(self):
|
||||
await self.deactivate()
|
||||
@ -594,7 +594,7 @@ class CqRing:
|
||||
pass
|
||||
|
||||
async def activate(self, eq):
|
||||
self.log.info("Activate CqRing %d (interface %d)", self.index, self.interface.index)
|
||||
self.log.info("Activate CQ %d (interface %d)", self.cqn, self.interface.index)
|
||||
|
||||
await self.deactivate()
|
||||
|
||||
@ -605,39 +605,39 @@ class CqRing:
|
||||
|
||||
self.buf[0:self.buf_size] = b'\x00'*self.buf_size
|
||||
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_BASE_ADDR_REG, self.buf_dma & 0xffffffff) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_BASE_ADDR_REG+4, self.buf_dma >> 32) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG, eq.index) # event index
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_HEAD_PTR_REG, self.head_ptr & self.hw_ptr_mask) # head pointer
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask) # tail pointer
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG, self.log_size | MQNIC_CPL_QUEUE_ACTIVE_MASK) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_BASE_ADDR_REG, self.buf_dma & 0xffffffff) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_BASE_ADDR_REG+4, self.buf_dma >> 32) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_INTERRUPT_INDEX_REG, eq.eqn) # event index
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_HEAD_PTR_REG, self.head_ptr & self.hw_ptr_mask) # head pointer
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask) # tail pointer
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_ACTIVE_LOG_SIZE_REG, self.log_size | MQNIC_CQ_ACTIVE_MASK) # active, log size
|
||||
|
||||
self.active = True
|
||||
|
||||
async def deactivate(self):
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG, self.log_size) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG, 0) # event index
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_ACTIVE_LOG_SIZE_REG, self.log_size) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_INTERRUPT_INDEX_REG, 0) # event index
|
||||
|
||||
self.eq = None
|
||||
|
||||
self.active = False
|
||||
|
||||
async def read_head_ptr(self):
|
||||
val = await self.hw_regs.read_dword(MQNIC_CPL_QUEUE_HEAD_PTR_REG)
|
||||
val = await self.hw_regs.read_dword(MQNIC_CQ_HEAD_PTR_REG)
|
||||
self.head_ptr += (val - self.head_ptr) & self.hw_ptr_mask
|
||||
|
||||
async def write_tail_ptr(self):
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask)
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask)
|
||||
|
||||
async def arm(self):
|
||||
if not self.active:
|
||||
return
|
||||
|
||||
await self.hw_regs.write_dword(MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG, self.eq.index | MQNIC_CPL_QUEUE_ARM_MASK) # event index
|
||||
await self.hw_regs.write_dword(MQNIC_CQ_INTERRUPT_INDEX_REG, self.eq.eqn | MQNIC_CQ_ARM_MASK) # event index
|
||||
|
||||
|
||||
class TxRing:
|
||||
class Txq:
|
||||
def __init__(self, interface, index, hw_regs):
|
||||
self.interface = interface
|
||||
self.log = interface.log
|
||||
@ -671,7 +671,7 @@ class TxRing:
|
||||
self.hw_regs = hw_regs
|
||||
|
||||
async def init(self):
|
||||
self.log.info("Init TxRing %d (interface %d)", self.index, self.interface.index)
|
||||
self.log.info("Init TXQ %d (interface %d)", self.index, self.interface.index)
|
||||
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
|
||||
@ -715,13 +715,13 @@ class TxRing:
|
||||
pass
|
||||
|
||||
async def activate(self, cq):
|
||||
self.log.info("Activate TxRing %d (interface %d)", self.index, self.interface.index)
|
||||
self.log.info("Activate TXQ %d (interface %d)", self.index, self.interface.index)
|
||||
|
||||
await self.deactivate()
|
||||
|
||||
self.cq = cq
|
||||
self.cq.src_ring = self
|
||||
self.cq.handler = TxRing.process_tx_cq
|
||||
self.cq.handler = Txq.process_tx_cq
|
||||
|
||||
self.head_ptr = 0
|
||||
self.tail_ptr = 0
|
||||
@ -729,7 +729,7 @@ class TxRing:
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_BASE_ADDR_REG, self.buf_dma & 0xffffffff) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_BASE_ADDR_REG+4, self.buf_dma >> 32) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_CPL_QUEUE_INDEX_REG, cq.index) # completion queue index
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_CPL_QUEUE_INDEX_REG, cq.cqn) # completion queue index
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_HEAD_PTR_REG, self.head_ptr & self.hw_ptr_mask) # head pointer
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask) # tail pointer
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG, self.log_queue_size | (self.log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK) # active, log desc block size, log queue size
|
||||
@ -775,7 +775,7 @@ class TxRing:
|
||||
async def process_tx_cq(cq):
|
||||
interface = cq.interface
|
||||
|
||||
interface.log.info("Process TX CQ %d for TX queue %d (interface %d)", cq.index, cq.src_ring.index, interface.index)
|
||||
interface.log.info("Process TX CQ %d for TXQ %d (interface %d)", cq.cqn, cq.src_ring.index, interface.index)
|
||||
|
||||
ring = cq.src_ring
|
||||
|
||||
@ -790,10 +790,10 @@ class TxRing:
|
||||
cpl_data = struct.unpack_from("<HHHxxLHHLBBHLL", cq.buf, cq_index*cq.stride)
|
||||
ring_index = cpl_data[1] & ring.size_mask
|
||||
|
||||
interface.log.info("CQ %d index %d data: %s", cq.index, cq_index, repr(cpl_data))
|
||||
interface.log.info("CQ %d index %d data: %s", cq.cqn, cq_index, repr(cpl_data))
|
||||
|
||||
if bool(cpl_data[-1] & 0x80000000) == bool(cq_tail_ptr & cq.size):
|
||||
interface.log.info("CQ %d empty", cq.index)
|
||||
interface.log.info("CQ %d empty", cq.cqn)
|
||||
break
|
||||
|
||||
interface.log.info("Ring index: %d", ring_index)
|
||||
@ -822,7 +822,7 @@ class TxRing:
|
||||
ring.clean_event.set()
|
||||
|
||||
|
||||
class RxRing:
|
||||
class Rxq:
|
||||
def __init__(self, interface, index, hw_regs):
|
||||
self.interface = interface
|
||||
self.log = interface.log
|
||||
@ -854,7 +854,7 @@ class RxRing:
|
||||
self.hw_regs = hw_regs
|
||||
|
||||
async def init(self):
|
||||
self.log.info("Init RxRing %d (interface %d)", self.index, self.interface.index)
|
||||
self.log.info("Init RXQ %d (interface %d)", self.index, self.interface.index)
|
||||
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
|
||||
@ -898,13 +898,13 @@ class RxRing:
|
||||
pass
|
||||
|
||||
async def activate(self, cq):
|
||||
self.log.info("Activate RxRing %d (interface %d)", self.index, self.interface.index)
|
||||
self.log.info("Activate RXQ %d (interface %d)", self.index, self.interface.index)
|
||||
|
||||
await self.deactivate()
|
||||
|
||||
self.cq = cq
|
||||
self.cq.src_ring = self
|
||||
self.cq.handler = RxRing.process_rx_cq
|
||||
self.cq.handler = Rxq.process_rx_cq
|
||||
|
||||
self.head_ptr = 0
|
||||
self.tail_ptr = 0
|
||||
@ -912,7 +912,7 @@ class RxRing:
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG, 0) # active, log size
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_BASE_ADDR_REG, self.buf_dma & 0xffffffff) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_BASE_ADDR_REG+4, self.buf_dma >> 32) # base address
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_CPL_QUEUE_INDEX_REG, cq.index) # completion queue index
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_CPL_QUEUE_INDEX_REG, cq.cqn) # completion queue index
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_HEAD_PTR_REG, self.head_ptr & self.hw_ptr_mask) # head pointer
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_TAIL_PTR_REG, self.tail_ptr & self.hw_ptr_mask) # tail pointer
|
||||
await self.hw_regs.write_dword(MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG, self.log_queue_size | (self.log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK) # active, log desc block size, log queue size
|
||||
@ -986,7 +986,7 @@ class RxRing:
|
||||
async def process_rx_cq(cq):
|
||||
interface = cq.interface
|
||||
|
||||
interface.log.info("Process RX CQ %d for RX queue %d (interface %d)", cq.index, cq.src_ring.index, interface.index)
|
||||
interface.log.info("Process RX CQ %d for RXQ %d (interface %d)", cq.cqn, cq.src_ring.index, interface.index)
|
||||
|
||||
ring = cq.src_ring
|
||||
|
||||
@ -1001,10 +1001,10 @@ class RxRing:
|
||||
cpl_data = struct.unpack_from("<HHHxxLHHLBBHLL", cq.buf, cq_index*cq.stride)
|
||||
ring_index = cpl_data[1] & ring.size_mask
|
||||
|
||||
interface.log.info("CQ %d index %d data: %s", cq.index, cq_index, repr(cpl_data))
|
||||
interface.log.info("CQ %d index %d data: %s", cq.cqn, cq_index, repr(cpl_data))
|
||||
|
||||
if bool(cpl_data[-1] & 0x80000000) == bool(cq_tail_ptr & cq.size):
|
||||
interface.log.info("CQ %d empty", cq.index)
|
||||
interface.log.info("CQ %d empty", cq.cqn)
|
||||
break
|
||||
|
||||
interface.log.info("Ring index: %d", ring_index)
|
||||
@ -1164,11 +1164,11 @@ class Interface:
|
||||
|
||||
self.reg_blocks = RegBlockList()
|
||||
self.if_ctrl_rb = None
|
||||
self.event_queue_rb = None
|
||||
self.tx_queue_rb = None
|
||||
self.tx_cpl_queue_rb = None
|
||||
self.rx_queue_rb = None
|
||||
self.rx_cpl_queue_rb = None
|
||||
self.eq_rb = None
|
||||
self.txq_rb = None
|
||||
self.tx_cq_rb = None
|
||||
self.rxq_rb = None
|
||||
self.rx_cq_rb = None
|
||||
self.rx_queue_map_rb = None
|
||||
|
||||
self.if_features = None
|
||||
@ -1176,21 +1176,21 @@ class Interface:
|
||||
self.max_tx_mtu = 0
|
||||
self.max_rx_mtu = 0
|
||||
|
||||
self.event_queue_offset = None
|
||||
self.event_queue_count = None
|
||||
self.event_queue_stride = None
|
||||
self.tx_queue_offset = None
|
||||
self.tx_queue_count = None
|
||||
self.tx_queue_stride = None
|
||||
self.tx_cpl_queue_offset = None
|
||||
self.tx_cpl_queue_count = None
|
||||
self.tx_cpl_queue_stride = None
|
||||
self.rx_queue_offset = None
|
||||
self.rx_queue_count = None
|
||||
self.rx_queue_stride = None
|
||||
self.rx_cpl_queue_offset = None
|
||||
self.rx_cpl_queue_count = None
|
||||
self.rx_cpl_queue_stride = None
|
||||
self.eq_offset = None
|
||||
self.eq_count = None
|
||||
self.eq_stride = None
|
||||
self.txq_offset = None
|
||||
self.txq_count = None
|
||||
self.txq_stride = None
|
||||
self.tx_cq_offset = None
|
||||
self.tx_cq_count = None
|
||||
self.tx_cq_stride = None
|
||||
self.rxq_offset = None
|
||||
self.rxq_count = None
|
||||
self.rxq_stride = None
|
||||
self.rx_cq_offset = None
|
||||
self.rx_cq_count = None
|
||||
self.rx_cq_stride = None
|
||||
|
||||
self.port_count = None
|
||||
self.sched_block_count = None
|
||||
@ -1198,12 +1198,12 @@ class Interface:
|
||||
self.rx_queue_map_indir_table_size = None
|
||||
self.rx_queue_map_indir_table = []
|
||||
|
||||
self.event_queues = []
|
||||
self.eq = []
|
||||
|
||||
self.tx_queues = []
|
||||
self.tx_cpl_queues = []
|
||||
self.rx_queues = []
|
||||
self.rx_cpl_queues = []
|
||||
self.txq = []
|
||||
self.tx_cq = []
|
||||
self.rxq = []
|
||||
self.rx_cq = []
|
||||
self.ports = []
|
||||
self.sched_blocks = []
|
||||
|
||||
@ -1235,65 +1235,65 @@ class Interface:
|
||||
|
||||
await self.set_mtu(min(self.max_tx_mtu, self.max_rx_mtu, 9214))
|
||||
|
||||
self.event_queue_rb = self.reg_blocks.find(MQNIC_RB_EVENT_QM_TYPE, MQNIC_RB_EVENT_QM_VER)
|
||||
self.eq_rb = self.reg_blocks.find(MQNIC_RB_EQM_TYPE, MQNIC_RB_EQM_VER)
|
||||
|
||||
self.event_queue_offset = await self.event_queue_rb.read_dword(MQNIC_RB_EVENT_QM_REG_OFFSET)
|
||||
self.event_queue_count = await self.event_queue_rb.read_dword(MQNIC_RB_EVENT_QM_REG_COUNT)
|
||||
self.event_queue_stride = await self.event_queue_rb.read_dword(MQNIC_RB_EVENT_QM_REG_STRIDE)
|
||||
self.eq_offset = await self.eq_rb.read_dword(MQNIC_RB_EQM_REG_OFFSET)
|
||||
self.eq_count = await self.eq_rb.read_dword(MQNIC_RB_EQM_REG_COUNT)
|
||||
self.eq_stride = await self.eq_rb.read_dword(MQNIC_RB_EQM_REG_STRIDE)
|
||||
|
||||
self.log.info("Event queue offset: 0x%08x", self.event_queue_offset)
|
||||
self.log.info("Event queue count: %d", self.event_queue_count)
|
||||
self.log.info("Event queue stride: 0x%08x", self.event_queue_stride)
|
||||
self.log.info("EQ offset: 0x%08x", self.eq_offset)
|
||||
self.log.info("EQ count: %d", self.eq_count)
|
||||
self.log.info("EQ stride: 0x%08x", self.eq_stride)
|
||||
|
||||
self.event_queue_count = min(self.event_queue_count, MQNIC_MAX_EVENT_RINGS)
|
||||
self.eq_count = min(self.eq_count, MQNIC_MAX_EQ)
|
||||
|
||||
self.tx_queue_rb = self.reg_blocks.find(MQNIC_RB_TX_QM_TYPE, MQNIC_RB_TX_QM_VER)
|
||||
self.txq_rb = self.reg_blocks.find(MQNIC_RB_TX_QM_TYPE, MQNIC_RB_TX_QM_VER)
|
||||
|
||||
self.tx_queue_offset = await self.tx_queue_rb.read_dword(MQNIC_RB_TX_QM_REG_OFFSET)
|
||||
self.tx_queue_count = await self.tx_queue_rb.read_dword(MQNIC_RB_TX_QM_REG_COUNT)
|
||||
self.tx_queue_stride = await self.tx_queue_rb.read_dword(MQNIC_RB_TX_QM_REG_STRIDE)
|
||||
self.txq_offset = await self.txq_rb.read_dword(MQNIC_RB_TX_QM_REG_OFFSET)
|
||||
self.txq_count = await self.txq_rb.read_dword(MQNIC_RB_TX_QM_REG_COUNT)
|
||||
self.txq_stride = await self.txq_rb.read_dword(MQNIC_RB_TX_QM_REG_STRIDE)
|
||||
|
||||
self.log.info("TX queue offset: 0x%08x", self.tx_queue_offset)
|
||||
self.log.info("TX queue count: %d", self.tx_queue_count)
|
||||
self.log.info("TX queue stride: 0x%08x", self.tx_queue_stride)
|
||||
self.log.info("TXQ offset: 0x%08x", self.txq_offset)
|
||||
self.log.info("TXQ count: %d", self.txq_count)
|
||||
self.log.info("TXQ stride: 0x%08x", self.txq_stride)
|
||||
|
||||
self.tx_queue_count = min(self.tx_queue_count, MQNIC_MAX_TX_RINGS)
|
||||
self.txq_count = min(self.txq_count, MQNIC_MAX_TXQ)
|
||||
|
||||
self.tx_cpl_queue_rb = self.reg_blocks.find(MQNIC_RB_TX_CQM_TYPE, MQNIC_RB_TX_CQM_VER)
|
||||
self.tx_cq_rb = self.reg_blocks.find(MQNIC_RB_TX_CQM_TYPE, MQNIC_RB_TX_CQM_VER)
|
||||
|
||||
self.tx_cpl_queue_offset = await self.tx_cpl_queue_rb.read_dword(MQNIC_RB_TX_CQM_REG_OFFSET)
|
||||
self.tx_cpl_queue_count = await self.tx_cpl_queue_rb.read_dword(MQNIC_RB_TX_CQM_REG_COUNT)
|
||||
self.tx_cpl_queue_stride = await self.tx_cpl_queue_rb.read_dword(MQNIC_RB_TX_CQM_REG_STRIDE)
|
||||
self.tx_cq_offset = await self.tx_cq_rb.read_dword(MQNIC_RB_TX_CQM_REG_OFFSET)
|
||||
self.tx_cq_count = await self.tx_cq_rb.read_dword(MQNIC_RB_TX_CQM_REG_COUNT)
|
||||
self.tx_cq_stride = await self.tx_cq_rb.read_dword(MQNIC_RB_TX_CQM_REG_STRIDE)
|
||||
|
||||
self.log.info("TX completion queue offset: 0x%08x", self.tx_cpl_queue_offset)
|
||||
self.log.info("TX completion queue count: %d", self.tx_cpl_queue_count)
|
||||
self.log.info("TX completion queue stride: 0x%08x", self.tx_cpl_queue_stride)
|
||||
self.log.info("TX CQ offset: 0x%08x", self.tx_cq_offset)
|
||||
self.log.info("TX CQ count: %d", self.tx_cq_count)
|
||||
self.log.info("TX CQ stride: 0x%08x", self.tx_cq_stride)
|
||||
|
||||
self.tx_cpl_queue_count = min(self.tx_cpl_queue_count, MQNIC_MAX_TX_CPL_RINGS)
|
||||
self.tx_cq_count = min(self.tx_cq_count, MQNIC_MAX_TX_CQ)
|
||||
|
||||
self.rx_queue_rb = self.reg_blocks.find(MQNIC_RB_RX_QM_TYPE, MQNIC_RB_RX_QM_VER)
|
||||
self.rxq_rb = self.reg_blocks.find(MQNIC_RB_RX_QM_TYPE, MQNIC_RB_RX_QM_VER)
|
||||
|
||||
self.rx_queue_offset = await self.rx_queue_rb.read_dword(MQNIC_RB_RX_QM_REG_OFFSET)
|
||||
self.rx_queue_count = await self.rx_queue_rb.read_dword(MQNIC_RB_RX_QM_REG_COUNT)
|
||||
self.rx_queue_stride = await self.rx_queue_rb.read_dword(MQNIC_RB_RX_QM_REG_STRIDE)
|
||||
self.rxq_offset = await self.rxq_rb.read_dword(MQNIC_RB_RX_QM_REG_OFFSET)
|
||||
self.rxq_count = await self.rxq_rb.read_dword(MQNIC_RB_RX_QM_REG_COUNT)
|
||||
self.rxq_stride = await self.rxq_rb.read_dword(MQNIC_RB_RX_QM_REG_STRIDE)
|
||||
|
||||
self.log.info("RX queue offset: 0x%08x", self.rx_queue_offset)
|
||||
self.log.info("RX queue count: %d", self.rx_queue_count)
|
||||
self.log.info("RX queue stride: 0x%08x", self.rx_queue_stride)
|
||||
self.log.info("RXQ offset: 0x%08x", self.rxq_offset)
|
||||
self.log.info("RXQ count: %d", self.rxq_count)
|
||||
self.log.info("RXQ stride: 0x%08x", self.rxq_stride)
|
||||
|
||||
self.rx_queue_count = min(self.rx_queue_count, MQNIC_MAX_RX_RINGS)
|
||||
self.rxq_count = min(self.rxq_count, MQNIC_MAX_RXQ)
|
||||
|
||||
self.rx_cpl_queue_rb = self.reg_blocks.find(MQNIC_RB_RX_CQM_TYPE, MQNIC_RB_RX_CQM_VER)
|
||||
self.rx_cq_rb = self.reg_blocks.find(MQNIC_RB_RX_CQM_TYPE, MQNIC_RB_RX_CQM_VER)
|
||||
|
||||
self.rx_cpl_queue_offset = await self.rx_cpl_queue_rb.read_dword(MQNIC_RB_RX_CQM_REG_OFFSET)
|
||||
self.rx_cpl_queue_count = await self.rx_cpl_queue_rb.read_dword(MQNIC_RB_RX_CQM_REG_COUNT)
|
||||
self.rx_cpl_queue_stride = await self.rx_cpl_queue_rb.read_dword(MQNIC_RB_RX_CQM_REG_STRIDE)
|
||||
self.rx_cq_offset = await self.rx_cq_rb.read_dword(MQNIC_RB_RX_CQM_REG_OFFSET)
|
||||
self.rx_cq_count = await self.rx_cq_rb.read_dword(MQNIC_RB_RX_CQM_REG_COUNT)
|
||||
self.rx_cq_stride = await self.rx_cq_rb.read_dword(MQNIC_RB_RX_CQM_REG_STRIDE)
|
||||
|
||||
self.log.info("RX completion queue offset: 0x%08x", self.rx_cpl_queue_offset)
|
||||
self.log.info("RX completion queue count: %d", self.rx_cpl_queue_count)
|
||||
self.log.info("RX completion queue stride: 0x%08x", self.rx_cpl_queue_stride)
|
||||
self.log.info("RX CQ offset: 0x%08x", self.rx_cq_offset)
|
||||
self.log.info("RX CQ count: %d", self.rx_cq_count)
|
||||
self.log.info("RX CQ stride: 0x%08x", self.rx_cq_stride)
|
||||
|
||||
self.rx_cpl_queue_count = min(self.rx_cpl_queue_count, MQNIC_MAX_RX_CPL_RINGS)
|
||||
self.rx_cq_count = min(self.rx_cq_count, MQNIC_MAX_RX_CQ)
|
||||
|
||||
self.rx_queue_map_rb = self.reg_blocks.find(MQNIC_RB_RX_QUEUE_MAP_TYPE, MQNIC_RB_RX_QUEUE_MAP_VER)
|
||||
|
||||
@ -1309,39 +1309,39 @@ class Interface:
|
||||
await self.set_rx_queue_map_app_mask(k, 0)
|
||||
await self.set_rx_queue_map_indir_table(k, 0, 0)
|
||||
|
||||
self.event_queues = []
|
||||
self.eq = []
|
||||
|
||||
self.tx_queues = []
|
||||
self.tx_cpl_queues = []
|
||||
self.rx_queues = []
|
||||
self.rx_cpl_queues = []
|
||||
self.txq = []
|
||||
self.tx_cq = []
|
||||
self.rxq = []
|
||||
self.rx_cq = []
|
||||
self.ports = []
|
||||
self.sched_blocks = []
|
||||
|
||||
for k in range(self.event_queue_count):
|
||||
eq = EqRing(self, k, self.hw_regs.create_window(self.event_queue_offset + k*self.event_queue_stride, self.event_queue_stride))
|
||||
for k in range(self.eq_count):
|
||||
eq = Eq(self, k, self.hw_regs.create_window(self.eq_offset + k*self.eq_stride, self.eq_stride))
|
||||
await eq.init()
|
||||
self.event_queues.append(eq)
|
||||
self.eq.append(eq)
|
||||
|
||||
for k in range(self.tx_queue_count):
|
||||
txq = TxRing(self, k, self.hw_regs.create_window(self.tx_queue_offset + k*self.tx_queue_stride, self.tx_queue_stride))
|
||||
for k in range(self.txq_count):
|
||||
txq = Txq(self, k, self.hw_regs.create_window(self.txq_offset + k*self.txq_stride, self.txq_stride))
|
||||
await txq.init()
|
||||
self.tx_queues.append(txq)
|
||||
self.txq.append(txq)
|
||||
|
||||
for k in range(self.tx_cpl_queue_count):
|
||||
cq = CqRing(self, k, self.hw_regs.create_window(self.tx_cpl_queue_offset + k*self.tx_cpl_queue_stride, self.tx_cpl_queue_stride))
|
||||
for k in range(self.tx_cq_count):
|
||||
cq = Cq(self, k, self.hw_regs.create_window(self.tx_cq_offset + k*self.tx_cq_stride, self.tx_cq_stride))
|
||||
await cq.init()
|
||||
self.tx_cpl_queues.append(cq)
|
||||
self.tx_cq.append(cq)
|
||||
|
||||
for k in range(self.rx_queue_count):
|
||||
rxq = RxRing(self, k, self.hw_regs.create_window(self.rx_queue_offset + k*self.rx_queue_stride, self.rx_queue_stride))
|
||||
for k in range(self.rxq_count):
|
||||
rxq = Rxq(self, k, self.hw_regs.create_window(self.rxq_offset + k*self.rxq_stride, self.rxq_stride))
|
||||
await rxq.init()
|
||||
self.rx_queues.append(rxq)
|
||||
self.rxq.append(rxq)
|
||||
|
||||
for k in range(self.rx_cpl_queue_count):
|
||||
cq = CqRing(self, k, self.hw_regs.create_window(self.rx_cpl_queue_offset + k*self.rx_cpl_queue_stride, self.rx_cpl_queue_stride))
|
||||
for k in range(self.rx_cq_count):
|
||||
cq = Cq(self, k, self.hw_regs.create_window(self.rx_cq_offset + k*self.rx_cq_stride, self.rx_cq_stride))
|
||||
await cq.init()
|
||||
self.rx_cpl_queues.append(cq)
|
||||
self.rx_cq.append(cq)
|
||||
|
||||
for k in range(self.port_count):
|
||||
rb = self.reg_blocks.find(MQNIC_RB_PORT_TYPE, MQNIC_RB_PORT_VER, index=k)
|
||||
@ -1359,7 +1359,7 @@ class Interface:
|
||||
|
||||
assert self.sched_block_count == len(self.sched_blocks)
|
||||
|
||||
for eq in self.event_queues:
|
||||
for eq in self.eq:
|
||||
await eq.alloc(1024, MQNIC_EVENT_SIZE)
|
||||
await eq.activate(self.index) # TODO?
|
||||
await eq.arm()
|
||||
@ -1368,18 +1368,18 @@ class Interface:
|
||||
await self.hw_regs.read_dword(0)
|
||||
|
||||
async def open(self):
|
||||
for rxq in self.rx_queues:
|
||||
cq = self.rx_cpl_queues[rxq.index]
|
||||
for rxq in self.rxq:
|
||||
cq = self.rx_cq[rxq.index]
|
||||
await cq.alloc(1024, MQNIC_CPL_SIZE)
|
||||
await cq.activate(self.event_queues[cq.index % self.event_queue_count])
|
||||
await cq.activate(self.eq[cq.cqn % self.eq_count])
|
||||
await cq.arm()
|
||||
await rxq.alloc(1024, MQNIC_DESC_SIZE*4)
|
||||
await rxq.activate(cq)
|
||||
|
||||
for txq in self.tx_queues:
|
||||
cq = self.tx_cpl_queues[txq.index]
|
||||
for txq in self.txq:
|
||||
cq = self.tx_cq[txq.index]
|
||||
await cq.alloc(1024, MQNIC_CPL_SIZE)
|
||||
await cq.activate(self.event_queues[cq.index % self.event_queue_count])
|
||||
await cq.activate(self.eq[cq.cqn % self.eq_count])
|
||||
await cq.arm()
|
||||
await txq.alloc(1024, MQNIC_DESC_SIZE*4)
|
||||
await txq.activate(cq)
|
||||
@ -1392,21 +1392,21 @@ class Interface:
|
||||
async def close(self):
|
||||
self.port_up = False
|
||||
|
||||
for txq in self.tx_queues:
|
||||
for txq in self.txq:
|
||||
await txq.deactivate()
|
||||
await txq.cq.deactivate()
|
||||
|
||||
for rxq in self.rx_queues:
|
||||
for rxq in self.rxq:
|
||||
await rxq.deactivate()
|
||||
await rxq.cq.deactivate()
|
||||
|
||||
# wait for all writes to complete
|
||||
await self.hw_regs.read_dword(0)
|
||||
|
||||
for q in self.tx_queues:
|
||||
for q in self.txq:
|
||||
await q.free_buf()
|
||||
|
||||
for q in self.rx_queues:
|
||||
for q in self.rxq:
|
||||
await q.free_buf()
|
||||
|
||||
async def start_xmit(self, skb, tx_ring=None, csum_start=None, csum_offset=None):
|
||||
@ -1422,7 +1422,7 @@ class Interface:
|
||||
else:
|
||||
ring_index = 0
|
||||
|
||||
ring = self.tx_queues[ring_index]
|
||||
ring = self.txq[ring_index]
|
||||
|
||||
while True:
|
||||
# check for space in ring
|
||||
@ -1597,7 +1597,7 @@ class Driver:
|
||||
|
||||
await self.dev.enable_device()
|
||||
await self.dev.set_master()
|
||||
await self.dev.alloc_irq_vectors(1, MQNIC_MAX_EVENT_RINGS)
|
||||
await self.dev.alloc_irq_vectors(1, MQNIC_MAX_EQ)
|
||||
|
||||
self.hw_regs = self.dev.bar_window[0]
|
||||
self.app_hw_regs = self.dev.bar_window[2]
|
||||
@ -1701,7 +1701,7 @@ class Driver:
|
||||
async def interrupt_handler(self, index):
|
||||
self.log.info("Interrupt handler start (IRQ %d)", index)
|
||||
for i in self.interfaces:
|
||||
for eq in i.event_queues:
|
||||
for eq in i.eq:
|
||||
if eq.irq == index:
|
||||
await eq.process_eq()
|
||||
await eq.arm()
|
||||
|
@ -248,7 +248,7 @@ async def run_test_nic(dut):
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
for k in range(interface.txq_count):
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
@ -383,7 +383,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
for k in range(len(pkts)):
|
||||
await tb.driver.interfaces[0].start_xmit(pkts[k], k % tb.driver.interfaces[0].tx_queue_count)
|
||||
await tb.driver.interfaces[0].start_xmit(pkts[k], k % tb.driver.interfaces[0].txq_count)
|
||||
|
||||
for k in range(count):
|
||||
pkt = await tb.driver.interfaces[0].recv()
|
||||
@ -460,7 +460,7 @@ async def run_test_nic(dut):
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].set_rx_queue_map_indir_table(block.index, 0, block.index)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
for k in range(block.interface.txq_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
|
@ -446,7 +446,7 @@ async def run_test_nic(dut):
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
for k in range(interface.txq_count):
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
@ -581,7 +581,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
for k in range(len(pkts)):
|
||||
await tb.driver.interfaces[0].start_xmit(pkts[k], k % tb.driver.interfaces[0].tx_queue_count)
|
||||
await tb.driver.interfaces[0].start_xmit(pkts[k], k % tb.driver.interfaces[0].txq_count)
|
||||
|
||||
for k in range(count):
|
||||
pkt = await tb.driver.interfaces[0].recv()
|
||||
@ -658,7 +658,7 @@ async def run_test_nic(dut):
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].set_rx_queue_map_indir_table(block.index, 0, block.index)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
for k in range(block.interface.txq_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
|
@ -394,7 +394,7 @@ async def run_test_nic(dut):
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
for k in range(interface.txq_count):
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
@ -529,7 +529,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
for k in range(len(pkts)):
|
||||
await tb.driver.interfaces[0].start_xmit(pkts[k], k % tb.driver.interfaces[0].tx_queue_count)
|
||||
await tb.driver.interfaces[0].start_xmit(pkts[k], k % tb.driver.interfaces[0].txq_count)
|
||||
|
||||
for k in range(count):
|
||||
pkt = await tb.driver.interfaces[0].recv()
|
||||
@ -606,7 +606,7 @@ async def run_test_nic(dut):
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].set_rx_queue_map_indir_table(block.index, 0, block.index)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
for k in range(block.interface.txq_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
|
@ -468,7 +468,7 @@ async def run_test_nic(dut):
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
for k in range(interface.txq_count):
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
@ -603,7 +603,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
for k in range(len(pkts)):
|
||||
await tb.driver.interfaces[0].start_xmit(pkts[k], k % tb.driver.interfaces[0].tx_queue_count)
|
||||
await tb.driver.interfaces[0].start_xmit(pkts[k], k % tb.driver.interfaces[0].txq_count)
|
||||
|
||||
for k in range(count):
|
||||
pkt = await tb.driver.interfaces[0].recv()
|
||||
@ -680,7 +680,7 @@ async def run_test_nic(dut):
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].set_rx_queue_map_indir_table(block.index, 0, block.index)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
for k in range(block.interface.txq_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
|
@ -468,7 +468,7 @@ async def run_test_nic(dut):
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
for k in range(interface.txq_count):
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
@ -603,7 +603,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
for k in range(len(pkts)):
|
||||
await tb.driver.interfaces[0].start_xmit(pkts[k], k % tb.driver.interfaces[0].tx_queue_count)
|
||||
await tb.driver.interfaces[0].start_xmit(pkts[k], k % tb.driver.interfaces[0].txq_count)
|
||||
|
||||
for k in range(count):
|
||||
pkt = await tb.driver.interfaces[0].recv()
|
||||
@ -680,7 +680,7 @@ async def run_test_nic(dut):
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].set_rx_queue_map_indir_table(block.index, 0, block.index)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
for k in range(block.interface.txq_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
@ -742,7 +742,7 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues with global enable off
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000001)
|
||||
|
||||
# configure slots
|
||||
|
@ -431,7 +431,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -499,7 +499,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -431,7 +431,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -499,7 +499,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -431,7 +431,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -499,7 +499,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -431,7 +431,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -499,7 +499,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -420,7 +420,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -488,7 +488,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -386,7 +386,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -420,7 +420,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -437,7 +437,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -777,7 +777,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -381,7 +381,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -551,7 +551,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -409,7 +409,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -434,7 +434,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -774,7 +774,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -475,7 +475,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -386,7 +386,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -374,7 +374,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -503,7 +503,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -403,7 +403,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -407,7 +407,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -435,7 +435,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -503,7 +503,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -429,7 +429,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -497,7 +497,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -511,7 +511,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -647,7 +647,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -204,7 +204,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -397,7 +397,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -174,7 +174,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -432,7 +432,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -501,7 +501,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -501,7 +501,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -640,7 +640,7 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
for k in range(tb.driver.interfaces[0].txq_count):
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
|
@ -98,11 +98,11 @@ struct mqnic_if {
|
||||
|
||||
struct mqnic_reg_block *rb_list;
|
||||
struct mqnic_reg_block *if_ctrl_rb;
|
||||
struct mqnic_reg_block *event_queue_rb;
|
||||
struct mqnic_reg_block *tx_queue_rb;
|
||||
struct mqnic_reg_block *tx_cpl_queue_rb;
|
||||
struct mqnic_reg_block *rx_queue_rb;
|
||||
struct mqnic_reg_block *rx_cpl_queue_rb;
|
||||
struct mqnic_reg_block *eq_rb;
|
||||
struct mqnic_reg_block *txq_rb;
|
||||
struct mqnic_reg_block *tx_cq_rb;
|
||||
struct mqnic_reg_block *rxq_rb;
|
||||
struct mqnic_reg_block *rx_cq_rb;
|
||||
struct mqnic_reg_block *rx_queue_map_rb;
|
||||
|
||||
uint32_t if_features;
|
||||
@ -113,22 +113,22 @@ struct mqnic_if {
|
||||
uint32_t rx_queue_map_indir_table_size;
|
||||
volatile uint8_t *rx_queue_map_indir_table[MQNIC_MAX_PORTS];
|
||||
|
||||
uint32_t event_queue_offset;
|
||||
uint32_t event_queue_count;
|
||||
uint32_t event_queue_stride;
|
||||
uint32_t eq_offset;
|
||||
uint32_t eq_count;
|
||||
uint32_t eq_stride;
|
||||
|
||||
uint32_t tx_queue_offset;
|
||||
uint32_t tx_queue_count;
|
||||
uint32_t tx_queue_stride;
|
||||
uint32_t tx_cpl_queue_offset;
|
||||
uint32_t tx_cpl_queue_count;
|
||||
uint32_t tx_cpl_queue_stride;
|
||||
uint32_t rx_queue_offset;
|
||||
uint32_t rx_queue_count;
|
||||
uint32_t rx_queue_stride;
|
||||
uint32_t rx_cpl_queue_offset;
|
||||
uint32_t rx_cpl_queue_count;
|
||||
uint32_t rx_cpl_queue_stride;
|
||||
uint32_t txq_offset;
|
||||
uint32_t txq_count;
|
||||
uint32_t txq_stride;
|
||||
uint32_t tx_cq_offset;
|
||||
uint32_t tx_cq_count;
|
||||
uint32_t tx_cq_stride;
|
||||
uint32_t rxq_offset;
|
||||
uint32_t rxq_count;
|
||||
uint32_t rxq_stride;
|
||||
uint32_t rx_cq_offset;
|
||||
uint32_t rx_cq_count;
|
||||
uint32_t rx_cq_stride;
|
||||
|
||||
uint32_t port_count;
|
||||
struct mqnic_port *ports[MQNIC_MAX_PORTS];
|
||||
|
@ -81,80 +81,80 @@ struct mqnic_if *mqnic_if_open(struct mqnic *dev, int index, volatile uint8_t *r
|
||||
interface->max_tx_mtu = mqnic_reg_read32(interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_MAX_TX_MTU);
|
||||
interface->max_rx_mtu = mqnic_reg_read32(interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_MAX_RX_MTU);
|
||||
|
||||
interface->event_queue_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_EVENT_QM_TYPE, MQNIC_RB_EVENT_QM_VER, 0);
|
||||
interface->eq_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_EQM_TYPE, MQNIC_RB_EQM_VER, 0);
|
||||
|
||||
if (!interface->event_queue_rb)
|
||||
if (!interface->eq_rb)
|
||||
{
|
||||
fprintf(stderr, "Error: Event queue block not found\n");
|
||||
fprintf(stderr, "Error: EQ block not found\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
interface->event_queue_offset = mqnic_reg_read32(interface->event_queue_rb->regs, MQNIC_RB_EVENT_QM_REG_OFFSET);
|
||||
interface->event_queue_count = mqnic_reg_read32(interface->event_queue_rb->regs, MQNIC_RB_EVENT_QM_REG_COUNT);
|
||||
interface->event_queue_stride = mqnic_reg_read32(interface->event_queue_rb->regs, MQNIC_RB_EVENT_QM_REG_STRIDE);
|
||||
interface->eq_offset = mqnic_reg_read32(interface->eq_rb->regs, MQNIC_RB_EQM_REG_OFFSET);
|
||||
interface->eq_count = mqnic_reg_read32(interface->eq_rb->regs, MQNIC_RB_EQM_REG_COUNT);
|
||||
interface->eq_stride = mqnic_reg_read32(interface->eq_rb->regs, MQNIC_RB_EQM_REG_STRIDE);
|
||||
|
||||
if (interface->event_queue_count > MQNIC_MAX_EVENT_RINGS)
|
||||
interface->event_queue_count = MQNIC_MAX_EVENT_RINGS;
|
||||
if (interface->eq_count > MQNIC_MAX_EQ)
|
||||
interface->eq_count = MQNIC_MAX_EQ;
|
||||
|
||||
interface->tx_queue_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_TX_QM_TYPE, MQNIC_RB_TX_QM_VER, 0);
|
||||
interface->txq_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_TX_QM_TYPE, MQNIC_RB_TX_QM_VER, 0);
|
||||
|
||||
if (!interface->tx_queue_rb)
|
||||
if (!interface->txq_rb)
|
||||
{
|
||||
fprintf(stderr, "Error: TX queue block not found\n");
|
||||
fprintf(stderr, "Error: TXQ block not found\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
interface->tx_queue_offset = mqnic_reg_read32(interface->tx_queue_rb->regs, MQNIC_RB_TX_QM_REG_OFFSET);
|
||||
interface->tx_queue_count = mqnic_reg_read32(interface->tx_queue_rb->regs, MQNIC_RB_TX_QM_REG_COUNT);
|
||||
interface->tx_queue_stride = mqnic_reg_read32(interface->tx_queue_rb->regs, MQNIC_RB_TX_QM_REG_STRIDE);
|
||||
interface->txq_offset = mqnic_reg_read32(interface->txq_rb->regs, MQNIC_RB_TX_QM_REG_OFFSET);
|
||||
interface->txq_count = mqnic_reg_read32(interface->txq_rb->regs, MQNIC_RB_TX_QM_REG_COUNT);
|
||||
interface->txq_stride = mqnic_reg_read32(interface->txq_rb->regs, MQNIC_RB_TX_QM_REG_STRIDE);
|
||||
|
||||
if (interface->tx_queue_count > MQNIC_MAX_TX_RINGS)
|
||||
interface->tx_queue_count = MQNIC_MAX_TX_RINGS;
|
||||
if (interface->txq_count > MQNIC_MAX_TXQ)
|
||||
interface->txq_count = MQNIC_MAX_TXQ;
|
||||
|
||||
interface->tx_cpl_queue_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_TX_CQM_TYPE, MQNIC_RB_TX_CQM_VER, 0);
|
||||
interface->tx_cq_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_TX_CQM_TYPE, MQNIC_RB_TX_CQM_VER, 0);
|
||||
|
||||
if (!interface->tx_cpl_queue_rb)
|
||||
if (!interface->tx_cq_rb)
|
||||
{
|
||||
fprintf(stderr, "Error: TX completion queue block not found\n");
|
||||
fprintf(stderr, "Error: TX CQ block not found\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
interface->tx_cpl_queue_offset = mqnic_reg_read32(interface->tx_cpl_queue_rb->regs, MQNIC_RB_TX_CQM_REG_OFFSET);
|
||||
interface->tx_cpl_queue_count = mqnic_reg_read32(interface->tx_cpl_queue_rb->regs, MQNIC_RB_TX_CQM_REG_COUNT);
|
||||
interface->tx_cpl_queue_stride = mqnic_reg_read32(interface->tx_cpl_queue_rb->regs, MQNIC_RB_TX_CQM_REG_STRIDE);
|
||||
interface->tx_cq_offset = mqnic_reg_read32(interface->tx_cq_rb->regs, MQNIC_RB_TX_CQM_REG_OFFSET);
|
||||
interface->tx_cq_count = mqnic_reg_read32(interface->tx_cq_rb->regs, MQNIC_RB_TX_CQM_REG_COUNT);
|
||||
interface->tx_cq_stride = mqnic_reg_read32(interface->tx_cq_rb->regs, MQNIC_RB_TX_CQM_REG_STRIDE);
|
||||
|
||||
if (interface->tx_cpl_queue_count > MQNIC_MAX_TX_CPL_RINGS)
|
||||
interface->tx_cpl_queue_count = MQNIC_MAX_TX_CPL_RINGS;
|
||||
if (interface->tx_cq_count > MQNIC_MAX_TX_CQ)
|
||||
interface->tx_cq_count = MQNIC_MAX_TX_CQ;
|
||||
|
||||
interface->rx_queue_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_RX_QM_TYPE, MQNIC_RB_RX_QM_VER, 0);
|
||||
interface->rxq_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_RX_QM_TYPE, MQNIC_RB_RX_QM_VER, 0);
|
||||
|
||||
if (!interface->rx_queue_rb)
|
||||
if (!interface->rxq_rb)
|
||||
{
|
||||
fprintf(stderr, "Error: RX queue block not found\n");
|
||||
fprintf(stderr, "Error: RXQ block not found\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
interface->rx_queue_offset = mqnic_reg_read32(interface->rx_queue_rb->regs, MQNIC_RB_RX_QM_REG_OFFSET);
|
||||
interface->rx_queue_count = mqnic_reg_read32(interface->rx_queue_rb->regs, MQNIC_RB_RX_QM_REG_COUNT);
|
||||
interface->rx_queue_stride = mqnic_reg_read32(interface->rx_queue_rb->regs, MQNIC_RB_RX_QM_REG_STRIDE);
|
||||
interface->rxq_offset = mqnic_reg_read32(interface->rxq_rb->regs, MQNIC_RB_RX_QM_REG_OFFSET);
|
||||
interface->rxq_count = mqnic_reg_read32(interface->rxq_rb->regs, MQNIC_RB_RX_QM_REG_COUNT);
|
||||
interface->rxq_stride = mqnic_reg_read32(interface->rxq_rb->regs, MQNIC_RB_RX_QM_REG_STRIDE);
|
||||
|
||||
if (interface->rx_queue_count > MQNIC_MAX_RX_RINGS)
|
||||
interface->rx_queue_count = MQNIC_MAX_RX_RINGS;
|
||||
if (interface->rxq_count > MQNIC_MAX_RXQ)
|
||||
interface->rxq_count = MQNIC_MAX_RXQ;
|
||||
|
||||
interface->rx_cpl_queue_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_RX_CQM_TYPE, MQNIC_RB_RX_CQM_VER, 0);
|
||||
interface->rx_cq_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_RX_CQM_TYPE, MQNIC_RB_RX_CQM_VER, 0);
|
||||
|
||||
if (!interface->rx_cpl_queue_rb)
|
||||
if (!interface->rx_cq_rb)
|
||||
{
|
||||
fprintf(stderr, "Error: RX completion queue block not found\n");
|
||||
fprintf(stderr, "Error: RX CQ block not found\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
interface->rx_cpl_queue_offset = mqnic_reg_read32(interface->rx_cpl_queue_rb->regs, MQNIC_RB_RX_CQM_REG_OFFSET);
|
||||
interface->rx_cpl_queue_count = mqnic_reg_read32(interface->rx_cpl_queue_rb->regs, MQNIC_RB_RX_CQM_REG_COUNT);
|
||||
interface->rx_cpl_queue_stride = mqnic_reg_read32(interface->rx_cpl_queue_rb->regs, MQNIC_RB_RX_CQM_REG_STRIDE);
|
||||
interface->rx_cq_offset = mqnic_reg_read32(interface->rx_cq_rb->regs, MQNIC_RB_RX_CQM_REG_OFFSET);
|
||||
interface->rx_cq_count = mqnic_reg_read32(interface->rx_cq_rb->regs, MQNIC_RB_RX_CQM_REG_COUNT);
|
||||
interface->rx_cq_stride = mqnic_reg_read32(interface->rx_cq_rb->regs, MQNIC_RB_RX_CQM_REG_STRIDE);
|
||||
|
||||
if (interface->rx_cpl_queue_count > MQNIC_MAX_RX_CPL_RINGS)
|
||||
interface->rx_cpl_queue_count = MQNIC_MAX_RX_CPL_RINGS;
|
||||
if (interface->rx_cq_count > MQNIC_MAX_RX_CQ)
|
||||
interface->rx_cq_count = MQNIC_MAX_RX_CQ;
|
||||
|
||||
interface->rx_queue_map_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_RX_QUEUE_MAP_TYPE, MQNIC_RB_RX_QUEUE_MAP_VER, 0);
|
||||
|
||||
|
@ -70,9 +70,9 @@
|
||||
// default interval to poll port TX/RX status, in ms
|
||||
#define MQNIC_LINK_STATUS_POLL_MS 1000
|
||||
|
||||
extern unsigned int mqnic_num_ev_queue_entries;
|
||||
extern unsigned int mqnic_num_tx_queue_entries;
|
||||
extern unsigned int mqnic_num_rx_queue_entries;
|
||||
extern unsigned int mqnic_num_eq_entries;
|
||||
extern unsigned int mqnic_num_txq_entries;
|
||||
extern unsigned int mqnic_num_rxq_entries;
|
||||
|
||||
extern unsigned int mqnic_link_status_poll;
|
||||
|
||||
@ -281,7 +281,7 @@ struct mqnic_ring {
|
||||
struct mqnic_if *interface;
|
||||
struct mqnic_priv *priv;
|
||||
int index;
|
||||
struct mqnic_cq_ring *cq_ring;
|
||||
struct mqnic_cq *cq;
|
||||
int active;
|
||||
|
||||
u32 hw_ptr_mask;
|
||||
@ -290,7 +290,7 @@ struct mqnic_ring {
|
||||
u8 __iomem *hw_tail_ptr;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct mqnic_cq_ring {
|
||||
struct mqnic_cq {
|
||||
u32 head_ptr;
|
||||
|
||||
u32 tail_ptr;
|
||||
@ -306,13 +306,12 @@ struct mqnic_cq_ring {
|
||||
struct device *dev;
|
||||
struct mqnic_if *interface;
|
||||
struct napi_struct napi;
|
||||
int index;
|
||||
struct mqnic_eq_ring *eq_ring;
|
||||
int cqn;
|
||||
struct mqnic_eq *eq;
|
||||
struct mqnic_ring *src_ring;
|
||||
int eq_index;
|
||||
int active;
|
||||
|
||||
void (*handler)(struct mqnic_cq_ring *ring);
|
||||
void (*handler)(struct mqnic_cq *cq);
|
||||
|
||||
u32 hw_ptr_mask;
|
||||
u8 __iomem *hw_addr;
|
||||
@ -320,7 +319,7 @@ struct mqnic_cq_ring {
|
||||
u8 __iomem *hw_tail_ptr;
|
||||
};
|
||||
|
||||
struct mqnic_eq_ring {
|
||||
struct mqnic_eq {
|
||||
u32 head_ptr;
|
||||
|
||||
u32 tail_ptr;
|
||||
@ -335,14 +334,13 @@ struct mqnic_eq_ring {
|
||||
|
||||
struct device *dev;
|
||||
struct mqnic_if *interface;
|
||||
int index;
|
||||
int eqn;
|
||||
struct mqnic_irq *irq;
|
||||
int irq_index;
|
||||
int active;
|
||||
|
||||
struct notifier_block irq_nb;
|
||||
|
||||
void (*handler)(struct mqnic_eq_ring *ring);
|
||||
void (*handler)(struct mqnic_eq *eq);
|
||||
|
||||
u32 hw_ptr_mask;
|
||||
u8 __iomem *hw_addr;
|
||||
@ -389,7 +387,7 @@ struct mqnic_sched_block {
|
||||
|
||||
int index;
|
||||
|
||||
u32 tx_queue_count;
|
||||
u32 txq_count;
|
||||
|
||||
u32 sched_count;
|
||||
struct mqnic_sched *sched[MQNIC_MAX_PORTS];
|
||||
@ -401,11 +399,11 @@ struct mqnic_if {
|
||||
|
||||
struct mqnic_reg_block *rb_list;
|
||||
struct mqnic_reg_block *if_ctrl_rb;
|
||||
struct mqnic_reg_block *event_queue_rb;
|
||||
struct mqnic_reg_block *tx_queue_rb;
|
||||
struct mqnic_reg_block *tx_cpl_queue_rb;
|
||||
struct mqnic_reg_block *rx_queue_rb;
|
||||
struct mqnic_reg_block *rx_cpl_queue_rb;
|
||||
struct mqnic_reg_block *eq_rb;
|
||||
struct mqnic_reg_block *txq_rb;
|
||||
struct mqnic_reg_block *tx_cq_rb;
|
||||
struct mqnic_reg_block *rxq_rb;
|
||||
struct mqnic_reg_block *rx_cq_rb;
|
||||
struct mqnic_reg_block *rx_queue_map_rb;
|
||||
|
||||
int index;
|
||||
@ -419,30 +417,30 @@ struct mqnic_if {
|
||||
u32 max_tx_mtu;
|
||||
u32 max_rx_mtu;
|
||||
|
||||
u32 event_queue_offset;
|
||||
u32 event_queue_count;
|
||||
u32 event_queue_stride;
|
||||
struct mqnic_eq_ring *event_ring[MQNIC_MAX_EVENT_RINGS];
|
||||
u32 eq_offset;
|
||||
u32 eq_count;
|
||||
u32 eq_stride;
|
||||
struct mqnic_eq *eq[MQNIC_MAX_EQ];
|
||||
|
||||
u32 tx_queue_offset;
|
||||
u32 tx_queue_count;
|
||||
u32 tx_queue_stride;
|
||||
struct mqnic_ring *tx_ring[MQNIC_MAX_TX_RINGS];
|
||||
u32 txq_offset;
|
||||
u32 txq_count;
|
||||
u32 txq_stride;
|
||||
struct mqnic_ring *txq[MQNIC_MAX_TXQ];
|
||||
|
||||
u32 tx_cpl_queue_offset;
|
||||
u32 tx_cpl_queue_count;
|
||||
u32 tx_cpl_queue_stride;
|
||||
struct mqnic_cq_ring *tx_cpl_ring[MQNIC_MAX_TX_CPL_RINGS];
|
||||
u32 tx_cq_offset;
|
||||
u32 tx_cq_count;
|
||||
u32 tx_cq_stride;
|
||||
struct mqnic_cq *tx_cq[MQNIC_MAX_TX_CQ];
|
||||
|
||||
u32 rx_queue_offset;
|
||||
u32 rx_queue_count;
|
||||
u32 rx_queue_stride;
|
||||
struct mqnic_ring *rx_ring[MQNIC_MAX_RX_RINGS];
|
||||
u32 rxq_offset;
|
||||
u32 rxq_count;
|
||||
u32 rxq_stride;
|
||||
struct mqnic_ring *rxq[MQNIC_MAX_RXQ];
|
||||
|
||||
u32 rx_cpl_queue_offset;
|
||||
u32 rx_cpl_queue_count;
|
||||
u32 rx_cpl_queue_stride;
|
||||
struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS];
|
||||
u32 rx_cq_offset;
|
||||
u32 rx_cq_count;
|
||||
u32 rx_cq_stride;
|
||||
struct mqnic_cq *rx_cq[MQNIC_MAX_RX_CQ];
|
||||
|
||||
u32 port_count;
|
||||
struct mqnic_port *port[MQNIC_MAX_PORTS];
|
||||
@ -482,20 +480,20 @@ struct mqnic_priv {
|
||||
unsigned int link_status;
|
||||
struct timer_list link_status_timer;
|
||||
|
||||
u32 event_queue_count;
|
||||
struct mqnic_eq_ring *event_ring[MQNIC_MAX_EVENT_RINGS];
|
||||
u32 eq_count;
|
||||
struct mqnic_eq *eq[MQNIC_MAX_EQ];
|
||||
|
||||
u32 tx_queue_count;
|
||||
struct mqnic_ring *tx_ring[MQNIC_MAX_TX_RINGS];
|
||||
u32 txq_count;
|
||||
struct mqnic_ring *txq[MQNIC_MAX_TXQ];
|
||||
|
||||
u32 tx_cpl_queue_count;
|
||||
struct mqnic_cq_ring *tx_cpl_ring[MQNIC_MAX_TX_CPL_RINGS];
|
||||
u32 tx_cq_count;
|
||||
struct mqnic_cq *tx_cq[MQNIC_MAX_TX_CQ];
|
||||
|
||||
u32 rx_queue_count;
|
||||
struct mqnic_ring *rx_ring[MQNIC_MAX_RX_RINGS];
|
||||
u32 rxq_count;
|
||||
struct mqnic_ring *rxq[MQNIC_MAX_RXQ];
|
||||
|
||||
u32 rx_cpl_queue_count;
|
||||
struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS];
|
||||
u32 rx_cq_count;
|
||||
struct mqnic_cq *rx_cq[MQNIC_MAX_RX_CQ];
|
||||
|
||||
u32 sched_block_count;
|
||||
struct mqnic_sched_block *sched_block[MQNIC_MAX_PORTS];
|
||||
@ -598,29 +596,29 @@ void mqnic_stats_init(struct mqnic_dev *mdev);
|
||||
u64 mqnic_stats_read(struct mqnic_dev *mdev, int index);
|
||||
|
||||
// mqnic_eq.c
|
||||
int mqnic_create_eq_ring(struct mqnic_if *interface, struct mqnic_eq_ring **ring_ptr,
|
||||
int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_eq_ring(struct mqnic_eq_ring **ring_ptr);
|
||||
int mqnic_alloc_eq_ring(struct mqnic_eq_ring *ring, int size, int stride);
|
||||
void mqnic_free_eq_ring(struct mqnic_eq_ring *ring);
|
||||
int mqnic_activate_eq_ring(struct mqnic_eq_ring *ring, struct mqnic_irq *irq);
|
||||
void mqnic_deactivate_eq_ring(struct mqnic_eq_ring *ring);
|
||||
void mqnic_eq_read_head_ptr(struct mqnic_eq_ring *ring);
|
||||
void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring);
|
||||
void mqnic_arm_eq(struct mqnic_eq_ring *ring);
|
||||
void mqnic_process_eq(struct mqnic_eq_ring *eq_ring);
|
||||
int mqnic_create_eq(struct mqnic_if *interface, struct mqnic_eq **eq_ptr,
|
||||
int eqn, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_eq(struct mqnic_eq **eq_ptr);
|
||||
int mqnic_alloc_eq(struct mqnic_eq *eq, int size, int stride);
|
||||
void mqnic_free_eq(struct mqnic_eq *eq);
|
||||
int mqnic_activate_eq(struct mqnic_eq *eq, struct mqnic_irq *irq);
|
||||
void mqnic_deactivate_eq(struct mqnic_eq *eq);
|
||||
void mqnic_eq_read_head_ptr(struct mqnic_eq *eq);
|
||||
void mqnic_eq_write_tail_ptr(struct mqnic_eq *eq);
|
||||
void mqnic_arm_eq(struct mqnic_eq *eq);
|
||||
void mqnic_process_eq(struct mqnic_eq *eq);
|
||||
|
||||
// mqnic_cq.c
|
||||
int mqnic_create_cq_ring(struct mqnic_if *interface, struct mqnic_cq_ring **ring_ptr,
|
||||
int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_cq_ring(struct mqnic_cq_ring **ring_ptr);
|
||||
int mqnic_alloc_cq_ring(struct mqnic_cq_ring *ring, int size, int stride);
|
||||
void mqnic_free_cq_ring(struct mqnic_cq_ring *ring);
|
||||
int mqnic_activate_cq_ring(struct mqnic_cq_ring *ring, struct mqnic_eq_ring *eq_ring);
|
||||
void mqnic_deactivate_cq_ring(struct mqnic_cq_ring *ring);
|
||||
void mqnic_cq_read_head_ptr(struct mqnic_cq_ring *ring);
|
||||
void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring);
|
||||
void mqnic_arm_cq(struct mqnic_cq_ring *ring);
|
||||
int mqnic_create_cq(struct mqnic_if *interface, struct mqnic_cq **cq_ptr,
|
||||
int cqn, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_cq(struct mqnic_cq **cq_ptr);
|
||||
int mqnic_alloc_cq(struct mqnic_cq *cq, int size, int stride);
|
||||
void mqnic_free_cq(struct mqnic_cq *cq);
|
||||
int mqnic_activate_cq(struct mqnic_cq *cq, struct mqnic_eq *eq);
|
||||
void mqnic_deactivate_cq(struct mqnic_cq *cq);
|
||||
void mqnic_cq_read_head_ptr(struct mqnic_cq *cq);
|
||||
void mqnic_cq_write_tail_ptr(struct mqnic_cq *cq);
|
||||
void mqnic_arm_cq(struct mqnic_cq *cq);
|
||||
|
||||
// mqnic_tx.c
|
||||
int mqnic_create_tx_ring(struct mqnic_if *interface, struct mqnic_ring **ring_ptr,
|
||||
@ -629,7 +627,7 @@ void mqnic_destroy_tx_ring(struct mqnic_ring **ring_ptr);
|
||||
int mqnic_alloc_tx_ring(struct mqnic_ring *ring, int size, int stride);
|
||||
void mqnic_free_tx_ring(struct mqnic_ring *ring);
|
||||
int mqnic_activate_tx_ring(struct mqnic_ring *ring, struct mqnic_priv *priv,
|
||||
struct mqnic_cq_ring *cq_ring);
|
||||
struct mqnic_cq *cq);
|
||||
void mqnic_deactivate_tx_ring(struct mqnic_ring *ring);
|
||||
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring);
|
||||
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring);
|
||||
@ -637,8 +635,8 @@ void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring);
|
||||
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring);
|
||||
void mqnic_free_tx_desc(struct mqnic_ring *ring, int index, int napi_budget);
|
||||
int mqnic_free_tx_buf(struct mqnic_ring *ring);
|
||||
int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget);
|
||||
void mqnic_tx_irq(struct mqnic_cq_ring *cq);
|
||||
int mqnic_process_tx_cq(struct mqnic_cq *cq, int napi_budget);
|
||||
void mqnic_tx_irq(struct mqnic_cq *cq);
|
||||
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget);
|
||||
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
@ -649,7 +647,7 @@ void mqnic_destroy_rx_ring(struct mqnic_ring **ring_ptr);
|
||||
int mqnic_alloc_rx_ring(struct mqnic_ring *ring, int size, int stride);
|
||||
void mqnic_free_rx_ring(struct mqnic_ring *ring);
|
||||
int mqnic_activate_rx_ring(struct mqnic_ring *ring, struct mqnic_priv *priv,
|
||||
struct mqnic_cq_ring *cq_ring);
|
||||
struct mqnic_cq *cq);
|
||||
void mqnic_deactivate_rx_ring(struct mqnic_ring *ring);
|
||||
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring);
|
||||
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring);
|
||||
@ -659,8 +657,8 @@ void mqnic_free_rx_desc(struct mqnic_ring *ring, int index);
|
||||
int mqnic_free_rx_buf(struct mqnic_ring *ring);
|
||||
int mqnic_prepare_rx_desc(struct mqnic_ring *ring, int index);
|
||||
void mqnic_refill_rx_buffers(struct mqnic_ring *ring);
|
||||
int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget);
|
||||
void mqnic_rx_irq(struct mqnic_cq_ring *cq);
|
||||
int mqnic_process_rx_cq(struct mqnic_cq *cq, int napi_budget);
|
||||
void mqnic_rx_irq(struct mqnic_cq *cq);
|
||||
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget);
|
||||
|
||||
// mqnic_ethtool.c
|
||||
|
@ -35,152 +35,151 @@
|
||||
|
||||
#include "mqnic.h"
|
||||
|
||||
int mqnic_create_cq_ring(struct mqnic_if *interface, struct mqnic_cq_ring **ring_ptr,
|
||||
int index, u8 __iomem *hw_addr)
|
||||
int mqnic_create_cq(struct mqnic_if *interface, struct mqnic_cq **cq_ptr,
|
||||
int cqn, u8 __iomem *hw_addr)
|
||||
{
|
||||
struct mqnic_cq_ring *ring;
|
||||
struct mqnic_cq *cq;
|
||||
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
if (!ring)
|
||||
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
|
||||
if (!cq)
|
||||
return -ENOMEM;
|
||||
|
||||
*ring_ptr = ring;
|
||||
*cq_ptr = cq;
|
||||
|
||||
ring->dev = interface->dev;
|
||||
ring->interface = interface;
|
||||
cq->dev = interface->dev;
|
||||
cq->interface = interface;
|
||||
|
||||
ring->index = index;
|
||||
ring->active = 0;
|
||||
cq->cqn = cqn;
|
||||
cq->active = 0;
|
||||
|
||||
ring->hw_addr = hw_addr;
|
||||
ring->hw_ptr_mask = 0xffff;
|
||||
ring->hw_head_ptr = hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG;
|
||||
ring->hw_tail_ptr = hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG;
|
||||
cq->hw_addr = hw_addr;
|
||||
cq->hw_ptr_mask = 0xffff;
|
||||
cq->hw_head_ptr = hw_addr + MQNIC_CQ_HEAD_PTR_REG;
|
||||
cq->hw_tail_ptr = hw_addr + MQNIC_CQ_TAIL_PTR_REG;
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
cq->head_ptr = 0;
|
||||
cq->tail_ptr = 0;
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(0, cq->hw_addr + MQNIC_CQ_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_destroy_cq_ring(struct mqnic_cq_ring **ring_ptr)
|
||||
void mqnic_destroy_cq(struct mqnic_cq **cq_ptr)
|
||||
{
|
||||
struct mqnic_cq_ring *ring = *ring_ptr;
|
||||
struct mqnic_cq *cq = *cq_ptr;
|
||||
|
||||
mqnic_free_cq_ring(ring);
|
||||
mqnic_free_cq(cq);
|
||||
|
||||
*ring_ptr = NULL;
|
||||
kfree(ring);
|
||||
*cq_ptr = NULL;
|
||||
kfree(cq);
|
||||
}
|
||||
|
||||
int mqnic_alloc_cq_ring(struct mqnic_cq_ring *ring, int size, int stride)
|
||||
int mqnic_alloc_cq(struct mqnic_cq *cq, int size, int stride)
|
||||
{
|
||||
if (ring->active || ring->buf)
|
||||
if (cq->active || cq->buf)
|
||||
return -EINVAL;
|
||||
|
||||
ring->size = roundup_pow_of_two(size);
|
||||
ring->size_mask = ring->size - 1;
|
||||
ring->stride = roundup_pow_of_two(stride);
|
||||
cq->size = roundup_pow_of_two(size);
|
||||
cq->size_mask = cq->size - 1;
|
||||
cq->stride = roundup_pow_of_two(stride);
|
||||
|
||||
ring->buf_size = ring->size * ring->stride;
|
||||
ring->buf = dma_alloc_coherent(ring->dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
|
||||
if (!ring->buf)
|
||||
cq->buf_size = cq->size * cq->stride;
|
||||
cq->buf = dma_alloc_coherent(cq->dev, cq->buf_size, &cq->buf_dma_addr, GFP_KERNEL);
|
||||
if (!cq->buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
cq->head_ptr = 0;
|
||||
cq->tail_ptr = 0;
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(0, cq->hw_addr + MQNIC_CQ_ACTIVE_LOG_SIZE_REG);
|
||||
// set base address
|
||||
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 0);
|
||||
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 4);
|
||||
iowrite32(cq->buf_dma_addr, cq->hw_addr + MQNIC_CQ_BASE_ADDR_REG + 0);
|
||||
iowrite32(cq->buf_dma_addr >> 32, cq->hw_addr + MQNIC_CQ_BASE_ADDR_REG + 4);
|
||||
// set interrupt index
|
||||
iowrite32(0, ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
|
||||
iowrite32(0, cq->hw_addr + MQNIC_CQ_INTERRUPT_INDEX_REG);
|
||||
// set pointers
|
||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG);
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG);
|
||||
iowrite32(cq->head_ptr & cq->hw_ptr_mask, cq->hw_addr + MQNIC_CQ_HEAD_PTR_REG);
|
||||
iowrite32(cq->tail_ptr & cq->hw_ptr_mask, cq->hw_addr + MQNIC_CQ_TAIL_PTR_REG);
|
||||
// set size
|
||||
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(ilog2(cq->size), cq->hw_addr + MQNIC_CQ_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_free_cq_ring(struct mqnic_cq_ring *ring)
|
||||
void mqnic_free_cq(struct mqnic_cq *cq)
|
||||
{
|
||||
mqnic_deactivate_cq_ring(ring);
|
||||
mqnic_deactivate_cq(cq);
|
||||
|
||||
if (ring->buf) {
|
||||
dma_free_coherent(ring->dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
|
||||
ring->buf = NULL;
|
||||
ring->buf_dma_addr = 0;
|
||||
if (cq->buf) {
|
||||
dma_free_coherent(cq->dev, cq->buf_size, cq->buf, cq->buf_dma_addr);
|
||||
cq->buf = NULL;
|
||||
cq->buf_dma_addr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int mqnic_activate_cq_ring(struct mqnic_cq_ring *ring, struct mqnic_eq_ring *eq_ring)
|
||||
int mqnic_activate_cq(struct mqnic_cq *cq, struct mqnic_eq *eq)
|
||||
{
|
||||
mqnic_deactivate_cq_ring(ring);
|
||||
mqnic_deactivate_cq(cq);
|
||||
|
||||
if (!ring->buf || !eq_ring)
|
||||
if (!cq->buf || !eq)
|
||||
return -EINVAL;
|
||||
|
||||
ring->eq_ring = eq_ring;
|
||||
ring->eq_index = eq_ring->index;
|
||||
cq->eq = eq;
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
cq->head_ptr = 0;
|
||||
cq->tail_ptr = 0;
|
||||
|
||||
memset(ring->buf, 1, ring->buf_size);
|
||||
memset(cq->buf, 1, cq->buf_size);
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(0, cq->hw_addr + MQNIC_CQ_ACTIVE_LOG_SIZE_REG);
|
||||
// set base address
|
||||
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 0);
|
||||
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 4);
|
||||
iowrite32(cq->buf_dma_addr, cq->hw_addr + MQNIC_CQ_BASE_ADDR_REG + 0);
|
||||
iowrite32(cq->buf_dma_addr >> 32, cq->hw_addr + MQNIC_CQ_BASE_ADDR_REG + 4);
|
||||
// set interrupt index
|
||||
iowrite32(ring->eq_index, ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
|
||||
iowrite32(cq->eq->eqn, cq->hw_addr + MQNIC_CQ_INTERRUPT_INDEX_REG);
|
||||
// set pointers
|
||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG);
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG);
|
||||
iowrite32(cq->head_ptr & cq->hw_ptr_mask, cq->hw_addr + MQNIC_CQ_HEAD_PTR_REG);
|
||||
iowrite32(cq->tail_ptr & cq->hw_ptr_mask, cq->hw_addr + MQNIC_CQ_TAIL_PTR_REG);
|
||||
// set size and activate queue
|
||||
iowrite32(ilog2(ring->size) | MQNIC_CPL_QUEUE_ACTIVE_MASK,
|
||||
ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(ilog2(cq->size) | MQNIC_CQ_ACTIVE_MASK,
|
||||
cq->hw_addr + MQNIC_CQ_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
ring->active = 1;
|
||||
cq->active = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_deactivate_cq_ring(struct mqnic_cq_ring *ring)
|
||||
void mqnic_deactivate_cq(struct mqnic_cq *cq)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(ilog2(cq->size), cq->hw_addr + MQNIC_CQ_ACTIVE_LOG_SIZE_REG);
|
||||
// disarm queue
|
||||
iowrite32(ring->eq_index, ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
|
||||
iowrite32(0, cq->hw_addr + MQNIC_CQ_INTERRUPT_INDEX_REG);
|
||||
|
||||
ring->eq_ring = NULL;
|
||||
cq->eq = NULL;
|
||||
|
||||
ring->active = 0;
|
||||
cq->active = 0;
|
||||
}
|
||||
|
||||
void mqnic_cq_read_head_ptr(struct mqnic_cq_ring *ring)
|
||||
void mqnic_cq_read_head_ptr(struct mqnic_cq *cq)
|
||||
{
|
||||
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
|
||||
cq->head_ptr += (ioread32(cq->hw_head_ptr) - cq->head_ptr) & cq->hw_ptr_mask;
|
||||
}
|
||||
|
||||
void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring)
|
||||
void mqnic_cq_write_tail_ptr(struct mqnic_cq *cq)
|
||||
{
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
|
||||
iowrite32(cq->tail_ptr & cq->hw_ptr_mask, cq->hw_tail_ptr);
|
||||
}
|
||||
|
||||
void mqnic_arm_cq(struct mqnic_cq_ring *ring)
|
||||
void mqnic_arm_cq(struct mqnic_cq *cq)
|
||||
{
|
||||
if (!ring->active)
|
||||
if (!cq->active)
|
||||
return;
|
||||
|
||||
iowrite32(ring->eq_index | MQNIC_CPL_QUEUE_ARM_MASK,
|
||||
ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
|
||||
iowrite32(cq->eq->eqn | MQNIC_CQ_ARM_MASK,
|
||||
cq->hw_addr + MQNIC_CQ_INTERRUPT_INDEX_REG);
|
||||
}
|
||||
|
@ -37,228 +37,227 @@
|
||||
|
||||
static int mqnic_eq_int(struct notifier_block *nb, unsigned long action, void *data)
|
||||
{
|
||||
struct mqnic_eq_ring *ring = container_of(nb, struct mqnic_eq_ring, irq_nb);
|
||||
struct mqnic_eq *eq = container_of(nb, struct mqnic_eq, irq_nb);
|
||||
|
||||
mqnic_process_eq(ring);
|
||||
mqnic_arm_eq(ring);
|
||||
mqnic_process_eq(eq);
|
||||
mqnic_arm_eq(eq);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
int mqnic_create_eq_ring(struct mqnic_if *interface, struct mqnic_eq_ring **ring_ptr,
|
||||
int index, u8 __iomem *hw_addr)
|
||||
int mqnic_create_eq(struct mqnic_if *interface, struct mqnic_eq **eq_ptr,
|
||||
int eqn, u8 __iomem *hw_addr)
|
||||
{
|
||||
struct mqnic_eq_ring *ring;
|
||||
struct mqnic_eq *eq;
|
||||
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
if (!ring)
|
||||
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
|
||||
if (!eq)
|
||||
return -ENOMEM;
|
||||
|
||||
*ring_ptr = ring;
|
||||
*eq_ptr = eq;
|
||||
|
||||
ring->dev = interface->dev;
|
||||
ring->interface = interface;
|
||||
eq->dev = interface->dev;
|
||||
eq->interface = interface;
|
||||
|
||||
ring->index = index;
|
||||
ring->active = 0;
|
||||
eq->eqn = eqn;
|
||||
eq->active = 0;
|
||||
|
||||
ring->irq_nb.notifier_call = mqnic_eq_int;
|
||||
eq->irq_nb.notifier_call = mqnic_eq_int;
|
||||
|
||||
ring->hw_addr = hw_addr;
|
||||
ring->hw_ptr_mask = 0xffff;
|
||||
ring->hw_head_ptr = hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG;
|
||||
ring->hw_tail_ptr = hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG;
|
||||
eq->hw_addr = hw_addr;
|
||||
eq->hw_ptr_mask = 0xffff;
|
||||
eq->hw_head_ptr = hw_addr + MQNIC_EQ_HEAD_PTR_REG;
|
||||
eq->hw_tail_ptr = hw_addr + MQNIC_EQ_TAIL_PTR_REG;
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
eq->head_ptr = 0;
|
||||
eq->tail_ptr = 0;
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(0, eq->hw_addr + MQNIC_EQ_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_destroy_eq_ring(struct mqnic_eq_ring **ring_ptr)
|
||||
void mqnic_destroy_eq(struct mqnic_eq **eq_ptr)
|
||||
{
|
||||
struct mqnic_eq_ring *ring = *ring_ptr;
|
||||
struct mqnic_eq *eq = *eq_ptr;
|
||||
|
||||
mqnic_free_eq_ring(ring);
|
||||
mqnic_free_eq(eq);
|
||||
|
||||
*ring_ptr = NULL;
|
||||
kfree(ring);
|
||||
*eq_ptr = NULL;
|
||||
kfree(eq);
|
||||
}
|
||||
|
||||
int mqnic_alloc_eq_ring(struct mqnic_eq_ring *ring, int size, int stride)
|
||||
int mqnic_alloc_eq(struct mqnic_eq *eq, int size, int stride)
|
||||
{
|
||||
if (ring->active || ring->buf)
|
||||
if (eq->active || eq->buf)
|
||||
return -EINVAL;
|
||||
|
||||
ring->size = roundup_pow_of_two(size);
|
||||
ring->size_mask = ring->size - 1;
|
||||
ring->stride = roundup_pow_of_two(stride);
|
||||
eq->size = roundup_pow_of_two(size);
|
||||
eq->size_mask = eq->size - 1;
|
||||
eq->stride = roundup_pow_of_two(stride);
|
||||
|
||||
ring->buf_size = ring->size * ring->stride;
|
||||
ring->buf = dma_alloc_coherent(ring->dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
|
||||
if (!ring->buf)
|
||||
eq->buf_size = eq->size * eq->stride;
|
||||
eq->buf = dma_alloc_coherent(eq->dev, eq->buf_size, &eq->buf_dma_addr, GFP_KERNEL);
|
||||
if (!eq->buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
eq->head_ptr = 0;
|
||||
eq->tail_ptr = 0;
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(0, eq->hw_addr + MQNIC_EQ_ACTIVE_LOG_SIZE_REG);
|
||||
// set base address
|
||||
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 0);
|
||||
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 4);
|
||||
iowrite32(eq->buf_dma_addr, eq->hw_addr + MQNIC_EQ_BASE_ADDR_REG + 0);
|
||||
iowrite32(eq->buf_dma_addr >> 32, eq->hw_addr + MQNIC_EQ_BASE_ADDR_REG + 4);
|
||||
// set interrupt index
|
||||
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||
iowrite32(0, eq->hw_addr + MQNIC_EQ_INTERRUPT_INDEX_REG);
|
||||
// set pointers
|
||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
||||
iowrite32(eq->head_ptr & eq->hw_ptr_mask, eq->hw_addr + MQNIC_EQ_HEAD_PTR_REG);
|
||||
iowrite32(eq->tail_ptr & eq->hw_ptr_mask, eq->hw_addr + MQNIC_EQ_TAIL_PTR_REG);
|
||||
// set size
|
||||
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(ilog2(eq->size), eq->hw_addr + MQNIC_EQ_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_free_eq_ring(struct mqnic_eq_ring *ring)
|
||||
void mqnic_free_eq(struct mqnic_eq *eq)
|
||||
{
|
||||
mqnic_deactivate_eq_ring(ring);
|
||||
mqnic_deactivate_eq(eq);
|
||||
|
||||
if (ring->buf) {
|
||||
dma_free_coherent(ring->dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
|
||||
ring->buf = NULL;
|
||||
ring->buf_dma_addr = 0;
|
||||
if (eq->buf) {
|
||||
dma_free_coherent(eq->dev, eq->buf_size, eq->buf, eq->buf_dma_addr);
|
||||
eq->buf = NULL;
|
||||
eq->buf_dma_addr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int mqnic_activate_eq_ring(struct mqnic_eq_ring *ring, struct mqnic_irq *irq)
|
||||
int mqnic_activate_eq(struct mqnic_eq *eq, struct mqnic_irq *irq)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mqnic_deactivate_eq_ring(ring);
|
||||
mqnic_deactivate_eq(eq);
|
||||
|
||||
if (!ring->buf || !irq)
|
||||
if (!eq->buf || !irq)
|
||||
return -EINVAL;
|
||||
|
||||
// register interrupt
|
||||
ret = atomic_notifier_chain_register(&irq->nh, &ring->irq_nb);
|
||||
ret = atomic_notifier_chain_register(&irq->nh, &eq->irq_nb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ring->irq = irq;
|
||||
ring->irq_index = irq->index;
|
||||
eq->irq = irq;
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
eq->head_ptr = 0;
|
||||
eq->tail_ptr = 0;
|
||||
|
||||
memset(ring->buf, 1, ring->buf_size);
|
||||
memset(eq->buf, 1, eq->buf_size);
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(0, eq->hw_addr + MQNIC_EQ_ACTIVE_LOG_SIZE_REG);
|
||||
// set base address
|
||||
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 0);
|
||||
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 4);
|
||||
iowrite32(eq->buf_dma_addr, eq->hw_addr + MQNIC_EQ_BASE_ADDR_REG + 0);
|
||||
iowrite32(eq->buf_dma_addr >> 32, eq->hw_addr + MQNIC_EQ_BASE_ADDR_REG + 4);
|
||||
// set interrupt index
|
||||
iowrite32(ring->irq_index, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||
iowrite32(eq->irq->index, eq->hw_addr + MQNIC_EQ_INTERRUPT_INDEX_REG);
|
||||
// set pointers
|
||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
||||
iowrite32(eq->head_ptr & eq->hw_ptr_mask, eq->hw_addr + MQNIC_EQ_HEAD_PTR_REG);
|
||||
iowrite32(eq->tail_ptr & eq->hw_ptr_mask, eq->hw_addr + MQNIC_EQ_TAIL_PTR_REG);
|
||||
// set size and activate queue
|
||||
iowrite32(ilog2(ring->size) | MQNIC_EVENT_QUEUE_ACTIVE_MASK,
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(ilog2(eq->size) | MQNIC_EQ_ACTIVE_MASK,
|
||||
eq->hw_addr + MQNIC_EQ_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
ring->active = 1;
|
||||
eq->active = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_deactivate_eq_ring(struct mqnic_eq_ring *ring)
|
||||
void mqnic_deactivate_eq(struct mqnic_eq *eq)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
// deactivate queue
|
||||
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(ilog2(eq->size), eq->hw_addr + MQNIC_EQ_ACTIVE_LOG_SIZE_REG);
|
||||
// disarm queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||
iowrite32(0, eq->hw_addr + MQNIC_EQ_INTERRUPT_INDEX_REG);
|
||||
|
||||
// unregister interrupt
|
||||
if (ring->irq)
|
||||
ret = atomic_notifier_chain_unregister(&ring->irq->nh, &ring->irq_nb);
|
||||
if (eq->irq)
|
||||
ret = atomic_notifier_chain_unregister(&eq->irq->nh, &eq->irq_nb);
|
||||
|
||||
ring->irq = NULL;
|
||||
eq->irq = NULL;
|
||||
|
||||
ring->active = 0;
|
||||
eq->active = 0;
|
||||
}
|
||||
|
||||
void mqnic_eq_read_head_ptr(struct mqnic_eq_ring *ring)
|
||||
void mqnic_eq_read_head_ptr(struct mqnic_eq *eq)
|
||||
{
|
||||
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
|
||||
eq->head_ptr += (ioread32(eq->hw_head_ptr) - eq->head_ptr) & eq->hw_ptr_mask;
|
||||
}
|
||||
|
||||
void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring)
|
||||
void mqnic_eq_write_tail_ptr(struct mqnic_eq *eq)
|
||||
{
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
|
||||
iowrite32(eq->tail_ptr & eq->hw_ptr_mask, eq->hw_tail_ptr);
|
||||
}
|
||||
|
||||
void mqnic_arm_eq(struct mqnic_eq_ring *ring)
|
||||
void mqnic_arm_eq(struct mqnic_eq *eq)
|
||||
{
|
||||
if (!ring->active)
|
||||
if (!eq->active)
|
||||
return;
|
||||
|
||||
iowrite32(ring->irq_index | MQNIC_EVENT_QUEUE_ARM_MASK,
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||
iowrite32(eq->irq->index | MQNIC_EQ_ARM_MASK,
|
||||
eq->hw_addr + MQNIC_EQ_INTERRUPT_INDEX_REG);
|
||||
}
|
||||
|
||||
void mqnic_process_eq(struct mqnic_eq_ring *eq_ring)
|
||||
void mqnic_process_eq(struct mqnic_eq *eq)
|
||||
{
|
||||
struct mqnic_if *interface = eq_ring->interface;
|
||||
struct mqnic_if *interface = eq->interface;
|
||||
struct mqnic_event *event;
|
||||
struct mqnic_cq_ring *cq_ring;
|
||||
struct mqnic_cq *cq;
|
||||
u32 eq_index;
|
||||
u32 eq_tail_ptr;
|
||||
int done = 0;
|
||||
|
||||
// read head pointer from NIC
|
||||
eq_tail_ptr = eq_ring->tail_ptr;
|
||||
eq_index = eq_tail_ptr & eq_ring->size_mask;
|
||||
eq_tail_ptr = eq->tail_ptr;
|
||||
eq_index = eq_tail_ptr & eq->size_mask;
|
||||
|
||||
while (1) {
|
||||
event = (struct mqnic_event *)(eq_ring->buf + eq_index * eq_ring->stride);
|
||||
event = (struct mqnic_event *)(eq->buf + eq_index * eq->stride);
|
||||
|
||||
if (!!(event->phase & cpu_to_le32(0x80000000)) == !!(eq_tail_ptr & eq_ring->size))
|
||||
if (!!(event->phase & cpu_to_le32(0x80000000)) == !!(eq_tail_ptr & eq->size))
|
||||
break;
|
||||
|
||||
dma_rmb();
|
||||
|
||||
if (event->type == MQNIC_EVENT_TYPE_TX_CPL) {
|
||||
// transmit completion event
|
||||
if (unlikely(le16_to_cpu(event->source) > interface->tx_cpl_queue_count)) {
|
||||
dev_err(eq_ring->dev, "%s on port %d: unknown event source %d (index %d, type %d)",
|
||||
if (unlikely(le16_to_cpu(event->source) > interface->tx_cq_count)) {
|
||||
dev_err(eq->dev, "%s on port %d: unknown event source %d (index %d, type %d)",
|
||||
__func__, interface->index, le16_to_cpu(event->source), eq_index,
|
||||
le16_to_cpu(event->type));
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||
event, MQNIC_EVENT_SIZE, true);
|
||||
} else {
|
||||
cq_ring = interface->tx_cpl_ring[le16_to_cpu(event->source)];
|
||||
if (likely(cq_ring && cq_ring->handler))
|
||||
cq_ring->handler(cq_ring);
|
||||
cq = interface->tx_cq[le16_to_cpu(event->source)];
|
||||
if (likely(cq && cq->handler))
|
||||
cq->handler(cq);
|
||||
}
|
||||
} else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL) {
|
||||
// receive completion event
|
||||
if (unlikely(le16_to_cpu(event->source) > interface->rx_cpl_queue_count)) {
|
||||
dev_err(eq_ring->dev, "%s on port %d: unknown event source %d (index %d, type %d)",
|
||||
if (unlikely(le16_to_cpu(event->source) > interface->rx_cq_count)) {
|
||||
dev_err(eq->dev, "%s on port %d: unknown event source %d (index %d, type %d)",
|
||||
__func__, interface->index, le16_to_cpu(event->source), eq_index,
|
||||
le16_to_cpu(event->type));
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||
event, MQNIC_EVENT_SIZE, true);
|
||||
} else {
|
||||
cq_ring = interface->rx_cpl_ring[le16_to_cpu(event->source)];
|
||||
if (likely(cq_ring && cq_ring->handler))
|
||||
cq_ring->handler(cq_ring);
|
||||
cq = interface->rx_cq[le16_to_cpu(event->source)];
|
||||
if (likely(cq && cq->handler))
|
||||
cq->handler(cq);
|
||||
}
|
||||
} else {
|
||||
dev_err(eq_ring->dev, "%s on port %d: unknown event type %d (index %d, source %d)",
|
||||
dev_err(eq->dev, "%s on port %d: unknown event type %d (index %d, source %d)",
|
||||
__func__, interface->index, le16_to_cpu(event->type), eq_index,
|
||||
le16_to_cpu(event->source));
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||
@ -268,10 +267,10 @@ void mqnic_process_eq(struct mqnic_eq_ring *eq_ring)
|
||||
done++;
|
||||
|
||||
eq_tail_ptr++;
|
||||
eq_index = eq_tail_ptr & eq_ring->size_mask;
|
||||
eq_index = eq_tail_ptr & eq->size_mask;
|
||||
}
|
||||
|
||||
// update eq tail
|
||||
eq_ring->tail_ptr = eq_tail_ptr;
|
||||
mqnic_eq_write_tail_ptr(eq_ring);
|
||||
eq->tail_ptr = eq_tail_ptr;
|
||||
mqnic_eq_write_tail_ptr(eq);
|
||||
}
|
||||
|
@ -46,11 +46,11 @@
|
||||
|
||||
#define MQNIC_MAX_FRAGS 8
|
||||
|
||||
#define MQNIC_MAX_EVENT_RINGS 256
|
||||
#define MQNIC_MAX_TX_RINGS 8192
|
||||
#define MQNIC_MAX_TX_CPL_RINGS 8192
|
||||
#define MQNIC_MAX_RX_RINGS 8192
|
||||
#define MQNIC_MAX_RX_CPL_RINGS 8192
|
||||
#define MQNIC_MAX_EQ 256
|
||||
#define MQNIC_MAX_TXQ 8192
|
||||
#define MQNIC_MAX_TX_CQ MQNIC_MAX_TXQ
|
||||
#define MQNIC_MAX_RXQ 8192
|
||||
#define MQNIC_MAX_RX_CQ MQNIC_MAX_RXQ
|
||||
|
||||
#define MQNIC_MAX_I2C_ADAPTERS 4
|
||||
|
||||
@ -221,11 +221,11 @@
|
||||
#define MQNIC_RB_RX_QUEUE_MAP_CH_REG_RSS_MASK 0x04
|
||||
#define MQNIC_RB_RX_QUEUE_MAP_CH_REG_APP_MASK 0x08
|
||||
|
||||
#define MQNIC_RB_EVENT_QM_TYPE 0x0000C010
|
||||
#define MQNIC_RB_EVENT_QM_VER 0x00000200
|
||||
#define MQNIC_RB_EVENT_QM_REG_OFFSET 0x0C
|
||||
#define MQNIC_RB_EVENT_QM_REG_COUNT 0x10
|
||||
#define MQNIC_RB_EVENT_QM_REG_STRIDE 0x14
|
||||
#define MQNIC_RB_EQM_TYPE 0x0000C010
|
||||
#define MQNIC_RB_EQM_VER 0x00000200
|
||||
#define MQNIC_RB_EQM_REG_OFFSET 0x0C
|
||||
#define MQNIC_RB_EQM_REG_COUNT 0x10
|
||||
#define MQNIC_RB_EQM_REG_STRIDE 0x14
|
||||
|
||||
#define MQNIC_RB_TX_QM_TYPE 0x0000C020
|
||||
#define MQNIC_RB_TX_QM_VER 0x00000200
|
||||
@ -315,27 +315,27 @@
|
||||
|
||||
#define MQNIC_QUEUE_ACTIVE_MASK 0x80000000
|
||||
|
||||
#define MQNIC_CPL_QUEUE_BASE_ADDR_REG 0x00
|
||||
#define MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG 0x08
|
||||
#define MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG 0x0C
|
||||
#define MQNIC_CPL_QUEUE_HEAD_PTR_REG 0x10
|
||||
#define MQNIC_CPL_QUEUE_TAIL_PTR_REG 0x18
|
||||
#define MQNIC_CQ_BASE_ADDR_REG 0x00
|
||||
#define MQNIC_CQ_ACTIVE_LOG_SIZE_REG 0x08
|
||||
#define MQNIC_CQ_INTERRUPT_INDEX_REG 0x0C
|
||||
#define MQNIC_CQ_HEAD_PTR_REG 0x10
|
||||
#define MQNIC_CQ_TAIL_PTR_REG 0x18
|
||||
|
||||
#define MQNIC_CPL_QUEUE_ACTIVE_MASK 0x80000000
|
||||
#define MQNIC_CQ_ACTIVE_MASK 0x80000000
|
||||
|
||||
#define MQNIC_CPL_QUEUE_ARM_MASK 0x80000000
|
||||
#define MQNIC_CPL_QUEUE_CONT_MASK 0x40000000
|
||||
#define MQNIC_CQ_ARM_MASK 0x80000000
|
||||
#define MQNIC_CQ_CONT_MASK 0x40000000
|
||||
|
||||
#define MQNIC_EVENT_QUEUE_BASE_ADDR_REG 0x00
|
||||
#define MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG 0x08
|
||||
#define MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG 0x0C
|
||||
#define MQNIC_EVENT_QUEUE_HEAD_PTR_REG 0x10
|
||||
#define MQNIC_EVENT_QUEUE_TAIL_PTR_REG 0x18
|
||||
#define MQNIC_EQ_BASE_ADDR_REG 0x00
|
||||
#define MQNIC_EQ_ACTIVE_LOG_SIZE_REG 0x08
|
||||
#define MQNIC_EQ_INTERRUPT_INDEX_REG 0x0C
|
||||
#define MQNIC_EQ_HEAD_PTR_REG 0x10
|
||||
#define MQNIC_EQ_TAIL_PTR_REG 0x18
|
||||
|
||||
#define MQNIC_EVENT_QUEUE_ACTIVE_MASK 0x80000000
|
||||
#define MQNIC_EQ_ACTIVE_MASK 0x80000000
|
||||
|
||||
#define MQNIC_EVENT_QUEUE_ARM_MASK 0x80000000
|
||||
#define MQNIC_EVENT_QUEUE_CONT_MASK 0x40000000
|
||||
#define MQNIC_EQ_ARM_MASK 0x80000000
|
||||
#define MQNIC_EQ_CONT_MASK 0x40000000
|
||||
|
||||
#define MQNIC_EVENT_TYPE_TX_CPL 0x0000
|
||||
#define MQNIC_EVENT_TYPE_RX_CPL 0x0001
|
||||
|
@ -94,95 +94,95 @@ int mqnic_create_interface(struct mqnic_dev *mdev, struct mqnic_if **interface_p
|
||||
dev_info(dev, "Max TX MTU: %d", interface->max_tx_mtu);
|
||||
dev_info(dev, "Max RX MTU: %d", interface->max_rx_mtu);
|
||||
|
||||
interface->event_queue_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_EVENT_QM_TYPE, MQNIC_RB_EVENT_QM_VER, 0);
|
||||
interface->eq_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_EQM_TYPE, MQNIC_RB_EQM_VER, 0);
|
||||
|
||||
if (!interface->event_queue_rb) {
|
||||
if (!interface->eq_rb) {
|
||||
ret = -EIO;
|
||||
dev_err(dev, "Event queue block not found");
|
||||
dev_err(dev, "EQ block not found");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
interface->event_queue_offset = ioread32(interface->event_queue_rb->regs + MQNIC_RB_EVENT_QM_REG_OFFSET);
|
||||
interface->event_queue_count = ioread32(interface->event_queue_rb->regs + MQNIC_RB_EVENT_QM_REG_COUNT);
|
||||
interface->event_queue_stride = ioread32(interface->event_queue_rb->regs + MQNIC_RB_EVENT_QM_REG_STRIDE);
|
||||
interface->eq_offset = ioread32(interface->eq_rb->regs + MQNIC_RB_EQM_REG_OFFSET);
|
||||
interface->eq_count = ioread32(interface->eq_rb->regs + MQNIC_RB_EQM_REG_COUNT);
|
||||
interface->eq_stride = ioread32(interface->eq_rb->regs + MQNIC_RB_EQM_REG_STRIDE);
|
||||
|
||||
dev_info(dev, "Event queue offset: 0x%08x", interface->event_queue_offset);
|
||||
dev_info(dev, "Event queue count: %d", interface->event_queue_count);
|
||||
dev_info(dev, "Event queue stride: 0x%08x", interface->event_queue_stride);
|
||||
dev_info(dev, "EQ offset: 0x%08x", interface->eq_offset);
|
||||
dev_info(dev, "EQ count: %d", interface->eq_count);
|
||||
dev_info(dev, "EQ stride: 0x%08x", interface->eq_stride);
|
||||
|
||||
interface->event_queue_count = min_t(u32, interface->event_queue_count, MQNIC_MAX_EVENT_RINGS);
|
||||
interface->eq_count = min_t(u32, interface->eq_count, MQNIC_MAX_EQ);
|
||||
|
||||
interface->tx_queue_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_TX_QM_TYPE, MQNIC_RB_TX_QM_VER, 0);
|
||||
interface->txq_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_TX_QM_TYPE, MQNIC_RB_TX_QM_VER, 0);
|
||||
|
||||
if (!interface->tx_queue_rb) {
|
||||
if (!interface->txq_rb) {
|
||||
ret = -EIO;
|
||||
dev_err(dev, "TX queue block not found");
|
||||
dev_err(dev, "TXQ block not found");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
interface->tx_queue_offset = ioread32(interface->tx_queue_rb->regs + MQNIC_RB_TX_QM_REG_OFFSET);
|
||||
interface->tx_queue_count = ioread32(interface->tx_queue_rb->regs + MQNIC_RB_TX_QM_REG_COUNT);
|
||||
interface->tx_queue_stride = ioread32(interface->tx_queue_rb->regs + MQNIC_RB_TX_QM_REG_STRIDE);
|
||||
interface->txq_offset = ioread32(interface->txq_rb->regs + MQNIC_RB_TX_QM_REG_OFFSET);
|
||||
interface->txq_count = ioread32(interface->txq_rb->regs + MQNIC_RB_TX_QM_REG_COUNT);
|
||||
interface->txq_stride = ioread32(interface->txq_rb->regs + MQNIC_RB_TX_QM_REG_STRIDE);
|
||||
|
||||
dev_info(dev, "TX queue offset: 0x%08x", interface->tx_queue_offset);
|
||||
dev_info(dev, "TX queue count: %d", interface->tx_queue_count);
|
||||
dev_info(dev, "TX queue stride: 0x%08x", interface->tx_queue_stride);
|
||||
dev_info(dev, "TXQ offset: 0x%08x", interface->txq_offset);
|
||||
dev_info(dev, "TXQ count: %d", interface->txq_count);
|
||||
dev_info(dev, "TXQ stride: 0x%08x", interface->txq_stride);
|
||||
|
||||
interface->tx_queue_count = min_t(u32, interface->tx_queue_count, MQNIC_MAX_TX_RINGS);
|
||||
interface->txq_count = min_t(u32, interface->txq_count, MQNIC_MAX_TXQ);
|
||||
|
||||
interface->tx_cpl_queue_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_TX_CQM_TYPE, MQNIC_RB_TX_CQM_VER, 0);
|
||||
interface->tx_cq_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_TX_CQM_TYPE, MQNIC_RB_TX_CQM_VER, 0);
|
||||
|
||||
if (!interface->tx_cpl_queue_rb) {
|
||||
if (!interface->tx_cq_rb) {
|
||||
ret = -EIO;
|
||||
dev_err(dev, "TX completion queue block not found");
|
||||
dev_err(dev, "TX CQ block not found");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
interface->tx_cpl_queue_offset = ioread32(interface->tx_cpl_queue_rb->regs + MQNIC_RB_TX_CQM_REG_OFFSET);
|
||||
interface->tx_cpl_queue_count = ioread32(interface->tx_cpl_queue_rb->regs + MQNIC_RB_TX_CQM_REG_COUNT);
|
||||
interface->tx_cpl_queue_stride = ioread32(interface->tx_cpl_queue_rb->regs + MQNIC_RB_TX_CQM_REG_STRIDE);
|
||||
interface->tx_cq_offset = ioread32(interface->tx_cq_rb->regs + MQNIC_RB_TX_CQM_REG_OFFSET);
|
||||
interface->tx_cq_count = ioread32(interface->tx_cq_rb->regs + MQNIC_RB_TX_CQM_REG_COUNT);
|
||||
interface->tx_cq_stride = ioread32(interface->tx_cq_rb->regs + MQNIC_RB_TX_CQM_REG_STRIDE);
|
||||
|
||||
dev_info(dev, "TX completion queue offset: 0x%08x", interface->tx_cpl_queue_offset);
|
||||
dev_info(dev, "TX completion queue count: %d", interface->tx_cpl_queue_count);
|
||||
dev_info(dev, "TX completion queue stride: 0x%08x", interface->tx_cpl_queue_stride);
|
||||
dev_info(dev, "TX CQ offset: 0x%08x", interface->tx_cq_offset);
|
||||
dev_info(dev, "TX CQ count: %d", interface->tx_cq_count);
|
||||
dev_info(dev, "TX CQ stride: 0x%08x", interface->tx_cq_stride);
|
||||
|
||||
interface->tx_cpl_queue_count = min_t(u32, interface->tx_cpl_queue_count, MQNIC_MAX_TX_CPL_RINGS);
|
||||
interface->tx_cq_count = min_t(u32, interface->tx_cq_count, MQNIC_MAX_TX_CQ);
|
||||
|
||||
interface->rx_queue_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_RX_QM_TYPE, MQNIC_RB_RX_QM_VER, 0);
|
||||
interface->rxq_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_RX_QM_TYPE, MQNIC_RB_RX_QM_VER, 0);
|
||||
|
||||
if (!interface->rx_queue_rb) {
|
||||
if (!interface->rxq_rb) {
|
||||
ret = -EIO;
|
||||
dev_err(dev, "RX queue block not found");
|
||||
dev_err(dev, "RXQ block not found");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
interface->rx_queue_offset = ioread32(interface->rx_queue_rb->regs + MQNIC_RB_RX_QM_REG_OFFSET);
|
||||
interface->rx_queue_count = ioread32(interface->rx_queue_rb->regs + MQNIC_RB_RX_QM_REG_COUNT);
|
||||
interface->rx_queue_stride = ioread32(interface->rx_queue_rb->regs + MQNIC_RB_RX_QM_REG_STRIDE);
|
||||
interface->rxq_offset = ioread32(interface->rxq_rb->regs + MQNIC_RB_RX_QM_REG_OFFSET);
|
||||
interface->rxq_count = ioread32(interface->rxq_rb->regs + MQNIC_RB_RX_QM_REG_COUNT);
|
||||
interface->rxq_stride = ioread32(interface->rxq_rb->regs + MQNIC_RB_RX_QM_REG_STRIDE);
|
||||
|
||||
dev_info(dev, "RX queue offset: 0x%08x", interface->rx_queue_offset);
|
||||
dev_info(dev, "RX queue count: %d", interface->rx_queue_count);
|
||||
dev_info(dev, "RX queue stride: 0x%08x", interface->rx_queue_stride);
|
||||
dev_info(dev, "RXQ offset: 0x%08x", interface->rxq_offset);
|
||||
dev_info(dev, "RXQ count: %d", interface->rxq_count);
|
||||
dev_info(dev, "RXQ stride: 0x%08x", interface->rxq_stride);
|
||||
|
||||
interface->rx_queue_count = min_t(u32, interface->rx_queue_count, MQNIC_MAX_RX_RINGS);
|
||||
interface->rxq_count = min_t(u32, interface->rxq_count, MQNIC_MAX_RXQ);
|
||||
|
||||
interface->rx_cpl_queue_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_RX_CQM_TYPE, MQNIC_RB_RX_CQM_VER, 0);
|
||||
interface->rx_cq_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_RX_CQM_TYPE, MQNIC_RB_RX_CQM_VER, 0);
|
||||
|
||||
if (!interface->rx_cpl_queue_rb) {
|
||||
if (!interface->rx_cq_rb) {
|
||||
ret = -EIO;
|
||||
dev_err(dev, "RX completion queue block not found");
|
||||
dev_err(dev, "RX CQ block not found");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
interface->rx_cpl_queue_offset = ioread32(interface->rx_cpl_queue_rb->regs + MQNIC_RB_RX_CQM_REG_OFFSET);
|
||||
interface->rx_cpl_queue_count = ioread32(interface->rx_cpl_queue_rb->regs + MQNIC_RB_RX_CQM_REG_COUNT);
|
||||
interface->rx_cpl_queue_stride = ioread32(interface->rx_cpl_queue_rb->regs + MQNIC_RB_RX_CQM_REG_STRIDE);
|
||||
interface->rx_cq_offset = ioread32(interface->rx_cq_rb->regs + MQNIC_RB_RX_CQM_REG_OFFSET);
|
||||
interface->rx_cq_count = ioread32(interface->rx_cq_rb->regs + MQNIC_RB_RX_CQM_REG_COUNT);
|
||||
interface->rx_cq_stride = ioread32(interface->rx_cq_rb->regs + MQNIC_RB_RX_CQM_REG_STRIDE);
|
||||
|
||||
dev_info(dev, "RX completion queue offset: 0x%08x", interface->rx_cpl_queue_offset);
|
||||
dev_info(dev, "RX completion queue count: %d", interface->rx_cpl_queue_count);
|
||||
dev_info(dev, "RX completion queue stride: 0x%08x", interface->rx_cpl_queue_stride);
|
||||
dev_info(dev, "RX CQ offset: 0x%08x", interface->rx_cq_offset);
|
||||
dev_info(dev, "RX CQ count: %d", interface->rx_cq_count);
|
||||
dev_info(dev, "RX CQ stride: 0x%08x", interface->rx_cq_stride);
|
||||
|
||||
interface->rx_cpl_queue_count = min_t(u32, interface->rx_cpl_queue_count, MQNIC_MAX_RX_CPL_RINGS);
|
||||
interface->rx_cq_count = min_t(u32, interface->rx_cq_count, MQNIC_MAX_RX_CQ);
|
||||
|
||||
interface->rx_queue_map_rb = mqnic_find_reg_block(interface->rb_list, MQNIC_RB_RX_QUEUE_MAP_TYPE, MQNIC_RB_RX_QUEUE_MAP_VER, 0);
|
||||
|
||||
@ -207,9 +207,9 @@ int mqnic_create_interface(struct mqnic_dev *mdev, struct mqnic_if **interface_p
|
||||
}
|
||||
|
||||
// determine desc block size
|
||||
iowrite32(0xf << 8, hw_addr + interface->tx_queue_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
interface->max_desc_block_size = 1 << ((ioread32(hw_addr + interface->tx_queue_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG) >> 8) & 0xf);
|
||||
iowrite32(0, hw_addr + interface->tx_queue_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
iowrite32(0xf << 8, hw_addr + interface->txq_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
interface->max_desc_block_size = 1 << ((ioread32(hw_addr + interface->txq_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG) >> 8) & 0xf);
|
||||
iowrite32(0, hw_addr + interface->txq_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
dev_info(dev, "Max desc block size: %d", interface->max_desc_block_size);
|
||||
|
||||
@ -218,45 +218,45 @@ int mqnic_create_interface(struct mqnic_dev *mdev, struct mqnic_if **interface_p
|
||||
desc_block_size = min_t(u32, interface->max_desc_block_size, 4);
|
||||
|
||||
// create rings
|
||||
for (k = 0; k < interface->event_queue_count; k++) {
|
||||
ret = mqnic_create_eq_ring(interface, &interface->event_ring[k], k,
|
||||
hw_addr + interface->event_queue_offset + k * interface->event_queue_stride);
|
||||
for (k = 0; k < interface->eq_count; k++) {
|
||||
ret = mqnic_create_eq(interface, &interface->eq[k], k,
|
||||
hw_addr + interface->eq_offset + k * interface->eq_stride);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = mqnic_alloc_eq_ring(interface->event_ring[k], mqnic_num_ev_queue_entries,
|
||||
ret = mqnic_alloc_eq(interface->eq[k], mqnic_num_eq_entries,
|
||||
MQNIC_EVENT_SIZE);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
mqnic_activate_eq_ring(interface->event_ring[k], mdev->irq[k % mdev->irq_count]);
|
||||
mqnic_arm_eq(interface->event_ring[k]);
|
||||
mqnic_activate_eq(interface->eq[k], mdev->irq[k % mdev->irq_count]);
|
||||
mqnic_arm_eq(interface->eq[k]);
|
||||
}
|
||||
|
||||
for (k = 0; k < interface->tx_queue_count; k++) {
|
||||
ret = mqnic_create_tx_ring(interface, &interface->tx_ring[k], k,
|
||||
hw_addr + interface->tx_queue_offset + k * interface->tx_queue_stride);
|
||||
for (k = 0; k < interface->txq_count; k++) {
|
||||
ret = mqnic_create_tx_ring(interface, &interface->txq[k], k,
|
||||
hw_addr + interface->txq_offset + k * interface->txq_stride);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < interface->tx_cpl_queue_count; k++) {
|
||||
ret = mqnic_create_cq_ring(interface, &interface->tx_cpl_ring[k], k,
|
||||
hw_addr + interface->tx_cpl_queue_offset + k * interface->tx_cpl_queue_stride);
|
||||
for (k = 0; k < interface->tx_cq_count; k++) {
|
||||
ret = mqnic_create_cq(interface, &interface->tx_cq[k], k,
|
||||
hw_addr + interface->tx_cq_offset + k * interface->tx_cq_stride);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < interface->rx_queue_count; k++) {
|
||||
ret = mqnic_create_rx_ring(interface, &interface->rx_ring[k], k,
|
||||
hw_addr + interface->rx_queue_offset + k * interface->rx_queue_stride);
|
||||
for (k = 0; k < interface->rxq_count; k++) {
|
||||
ret = mqnic_create_rx_ring(interface, &interface->rxq[k], k,
|
||||
hw_addr + interface->rxq_offset + k * interface->rxq_stride);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < interface->rx_cpl_queue_count; k++) {
|
||||
ret = mqnic_create_cq_ring(interface, &interface->rx_cpl_ring[k], k,
|
||||
hw_addr + interface->rx_cpl_queue_offset + k * interface->rx_cpl_queue_stride);
|
||||
for (k = 0; k < interface->rx_cq_count; k++) {
|
||||
ret = mqnic_create_cq(interface, &interface->rx_cq[k], k,
|
||||
hw_addr + interface->rx_cq_offset + k * interface->rx_cq_stride);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
@ -322,25 +322,25 @@ void mqnic_destroy_interface(struct mqnic_if **interface_ptr)
|
||||
mqnic_destroy_netdev(&interface->ndev[k]);
|
||||
|
||||
// free rings
|
||||
for (k = 0; k < ARRAY_SIZE(interface->event_ring); k++)
|
||||
if (interface->event_ring[k])
|
||||
mqnic_destroy_eq_ring(&interface->event_ring[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(interface->eq); k++)
|
||||
if (interface->eq[k])
|
||||
mqnic_destroy_eq(&interface->eq[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(interface->tx_ring); k++)
|
||||
if (interface->tx_ring[k])
|
||||
mqnic_destroy_tx_ring(&interface->tx_ring[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(interface->txq); k++)
|
||||
if (interface->txq[k])
|
||||
mqnic_destroy_tx_ring(&interface->txq[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(interface->tx_cpl_ring); k++)
|
||||
if (interface->tx_cpl_ring[k])
|
||||
mqnic_destroy_cq_ring(&interface->tx_cpl_ring[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(interface->tx_cq); k++)
|
||||
if (interface->tx_cq[k])
|
||||
mqnic_destroy_cq(&interface->tx_cq[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(interface->rx_ring); k++)
|
||||
if (interface->rx_ring[k])
|
||||
mqnic_destroy_rx_ring(&interface->rx_ring[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(interface->rxq); k++)
|
||||
if (interface->rxq[k])
|
||||
mqnic_destroy_rx_ring(&interface->rxq[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(interface->rx_cpl_ring); k++)
|
||||
if (interface->rx_cpl_ring[k])
|
||||
mqnic_destroy_cq_ring(&interface->rx_cpl_ring[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(interface->rx_cq); k++)
|
||||
if (interface->rx_cq[k])
|
||||
mqnic_destroy_cq(&interface->rx_cq[k]);
|
||||
|
||||
// free schedulers
|
||||
for (k = 0; k < ARRAY_SIZE(interface->sched_block); k++)
|
||||
|
@ -48,16 +48,16 @@ MODULE_AUTHOR("Alex Forencich");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(DRIVER_VERSION);
|
||||
|
||||
unsigned int mqnic_num_ev_queue_entries = 1024;
|
||||
unsigned int mqnic_num_tx_queue_entries = 1024;
|
||||
unsigned int mqnic_num_rx_queue_entries = 1024;
|
||||
unsigned int mqnic_num_eq_entries = 1024;
|
||||
unsigned int mqnic_num_txq_entries = 1024;
|
||||
unsigned int mqnic_num_rxq_entries = 1024;
|
||||
|
||||
module_param_named(num_ev_queue_entries, mqnic_num_ev_queue_entries, uint, 0444);
|
||||
MODULE_PARM_DESC(num_ev_queue_entries, "number of entries to allocate per event queue (default: 1024)");
|
||||
module_param_named(num_tx_queue_entries, mqnic_num_tx_queue_entries, uint, 0444);
|
||||
MODULE_PARM_DESC(num_tx_queue_entries, "number of entries to allocate per transmit queue (default: 1024)");
|
||||
module_param_named(num_rx_queue_entries, mqnic_num_rx_queue_entries, uint, 0444);
|
||||
MODULE_PARM_DESC(num_rx_queue_entries, "number of entries to allocate per receive queue (default: 1024)");
|
||||
module_param_named(num_eq_entries, mqnic_num_eq_entries, uint, 0444);
|
||||
MODULE_PARM_DESC(num_eq_entries, "number of entries to allocate per event queue (default: 1024)");
|
||||
module_param_named(num_txq_entries, mqnic_num_txq_entries, uint, 0444);
|
||||
MODULE_PARM_DESC(num_txq_entries, "number of entries to allocate per transmit queue (default: 1024)");
|
||||
module_param_named(num_rxq_entries, mqnic_num_rxq_entries, uint, 0444);
|
||||
MODULE_PARM_DESC(num_rxq_entries, "number of entries to allocate per receive queue (default: 1024)");
|
||||
|
||||
unsigned int mqnic_link_status_poll = MQNIC_LINK_STATUS_POLL_MS;
|
||||
|
||||
|
@ -47,51 +47,45 @@ static int mqnic_start_port(struct net_device *ndev)
|
||||
priv->interface->index, priv->index);
|
||||
|
||||
// set up RX queues
|
||||
for (k = 0; k < min(priv->rx_queue_count, priv->rx_cpl_queue_count); k++) {
|
||||
for (k = 0; k < min(priv->rxq_count, priv->rx_cq_count); k++) {
|
||||
// set up CQ
|
||||
mqnic_activate_cq_ring(priv->rx_cpl_ring[k],
|
||||
priv->event_ring[k % priv->event_queue_count]);
|
||||
mqnic_activate_cq(priv->rx_cq[k], priv->eq[k % priv->eq_count]);
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)
|
||||
netif_napi_add(ndev, &priv->rx_cpl_ring[k]->napi,
|
||||
mqnic_poll_rx_cq);
|
||||
netif_napi_add(ndev, &priv->rx_cq[k]->napi, mqnic_poll_rx_cq);
|
||||
#else
|
||||
netif_napi_add(ndev, &priv->rx_cpl_ring[k]->napi,
|
||||
mqnic_poll_rx_cq, NAPI_POLL_WEIGHT);
|
||||
netif_napi_add(ndev, &priv->rx_cq[k]->napi, mqnic_poll_rx_cq, NAPI_POLL_WEIGHT);
|
||||
#endif
|
||||
napi_enable(&priv->rx_cpl_ring[k]->napi);
|
||||
napi_enable(&priv->rx_cq[k]->napi);
|
||||
|
||||
mqnic_arm_cq(priv->rx_cpl_ring[k]);
|
||||
mqnic_arm_cq(priv->rx_cq[k]);
|
||||
|
||||
// set up queue
|
||||
priv->rx_ring[k]->mtu = ndev->mtu;
|
||||
priv->rxq[k]->mtu = ndev->mtu;
|
||||
if (ndev->mtu + ETH_HLEN <= PAGE_SIZE)
|
||||
priv->rx_ring[k]->page_order = 0;
|
||||
priv->rxq[k]->page_order = 0;
|
||||
else
|
||||
priv->rx_ring[k]->page_order = ilog2((ndev->mtu + ETH_HLEN + PAGE_SIZE - 1) / PAGE_SIZE - 1) + 1;
|
||||
mqnic_activate_rx_ring(priv->rx_ring[k], priv, priv->rx_cpl_ring[k]);
|
||||
priv->rxq[k]->page_order = ilog2((ndev->mtu + ETH_HLEN + PAGE_SIZE - 1) / PAGE_SIZE - 1) + 1;
|
||||
mqnic_activate_rx_ring(priv->rxq[k], priv, priv->rx_cq[k]);
|
||||
}
|
||||
|
||||
// set up TX queues
|
||||
for (k = 0; k < min(priv->tx_queue_count, priv->tx_cpl_queue_count); k++) {
|
||||
for (k = 0; k < min(priv->txq_count, priv->tx_cq_count); k++) {
|
||||
// set up CQ
|
||||
mqnic_activate_cq_ring(priv->tx_cpl_ring[k],
|
||||
priv->event_ring[k % priv->event_queue_count]);
|
||||
mqnic_activate_cq(priv->tx_cq[k], priv->eq[k % priv->eq_count]);
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)
|
||||
netif_napi_add_tx(ndev, &priv->tx_cpl_ring[k]->napi,
|
||||
mqnic_poll_tx_cq);
|
||||
netif_napi_add_tx(ndev, &priv->tx_cq[k]->napi, mqnic_poll_tx_cq);
|
||||
#else
|
||||
netif_tx_napi_add(ndev, &priv->tx_cpl_ring[k]->napi,
|
||||
mqnic_poll_tx_cq, NAPI_POLL_WEIGHT);
|
||||
netif_tx_napi_add(ndev, &priv->tx_cq[k]->napi, mqnic_poll_tx_cq, NAPI_POLL_WEIGHT);
|
||||
#endif
|
||||
napi_enable(&priv->tx_cpl_ring[k]->napi);
|
||||
napi_enable(&priv->tx_cq[k]->napi);
|
||||
|
||||
mqnic_arm_cq(priv->tx_cpl_ring[k]);
|
||||
mqnic_arm_cq(priv->tx_cq[k]);
|
||||
|
||||
// set up queue
|
||||
priv->tx_ring[k]->tx_queue = netdev_get_tx_queue(ndev, k);
|
||||
mqnic_activate_tx_ring(priv->tx_ring[k], priv, priv->tx_cpl_ring[k]);
|
||||
priv->txq[k]->tx_queue = netdev_get_tx_queue(ndev, k);
|
||||
mqnic_activate_tx_ring(priv->txq[k], priv, priv->tx_cq[k]);
|
||||
}
|
||||
|
||||
// set MTU
|
||||
@ -103,7 +97,7 @@ static int mqnic_start_port(struct net_device *ndev)
|
||||
mqnic_interface_set_rx_queue_map_app_mask(priv->interface, 0, 0);
|
||||
|
||||
for (k = 0; k < priv->interface->rx_queue_map_indir_table_size; k++) {
|
||||
mqnic_interface_set_rx_queue_map_indir_table(priv->interface, 0, k, priv->rx_ring[k % priv->rx_queue_count]->index);
|
||||
mqnic_interface_set_rx_queue_map_indir_table(priv->interface, 0, k, priv->rxq[k % priv->rxq_count]->index);
|
||||
}
|
||||
|
||||
// enable first scheduler
|
||||
@ -155,36 +149,36 @@ static int mqnic_stop_port(struct net_device *ndev)
|
||||
mqnic_deactivate_sched_block(priv->sched_block[k]);
|
||||
|
||||
// deactivate TX queues
|
||||
for (k = 0; k < min(priv->tx_queue_count, priv->tx_cpl_queue_count); k++) {
|
||||
napi_disable(&priv->tx_cpl_ring[k]->napi);
|
||||
for (k = 0; k < min(priv->txq_count, priv->tx_cq_count); k++) {
|
||||
napi_disable(&priv->tx_cq[k]->napi);
|
||||
|
||||
mqnic_deactivate_tx_ring(priv->tx_ring[k]);
|
||||
mqnic_deactivate_tx_ring(priv->txq[k]);
|
||||
|
||||
mqnic_deactivate_cq_ring(priv->tx_cpl_ring[k]);
|
||||
mqnic_deactivate_cq(priv->tx_cq[k]);
|
||||
|
||||
netif_napi_del(&priv->tx_cpl_ring[k]->napi);
|
||||
netif_napi_del(&priv->tx_cq[k]->napi);
|
||||
}
|
||||
|
||||
// deactivate RX queues
|
||||
for (k = 0; k < min(priv->rx_queue_count, priv->rx_cpl_queue_count); k++) {
|
||||
napi_disable(&priv->rx_cpl_ring[k]->napi);
|
||||
for (k = 0; k < min(priv->rxq_count, priv->rx_cq_count); k++) {
|
||||
napi_disable(&priv->rx_cq[k]->napi);
|
||||
|
||||
mqnic_deactivate_rx_ring(priv->rx_ring[k]);
|
||||
mqnic_deactivate_rx_ring(priv->rxq[k]);
|
||||
|
||||
mqnic_deactivate_cq_ring(priv->rx_cpl_ring[k]);
|
||||
mqnic_deactivate_cq(priv->rx_cq[k]);
|
||||
|
||||
netif_napi_del(&priv->rx_cpl_ring[k]->napi);
|
||||
netif_napi_del(&priv->rx_cq[k]->napi);
|
||||
}
|
||||
|
||||
msleep(20);
|
||||
|
||||
// free descriptors in TX queues
|
||||
for (k = 0; k < priv->tx_queue_count; k++)
|
||||
mqnic_free_tx_buf(priv->tx_ring[k]);
|
||||
for (k = 0; k < priv->txq_count; k++)
|
||||
mqnic_free_tx_buf(priv->txq[k]);
|
||||
|
||||
// free descriptors in RX queues
|
||||
for (k = 0; k < priv->rx_queue_count; k++)
|
||||
mqnic_free_rx_buf(priv->rx_ring[k]);
|
||||
for (k = 0; k < priv->rxq_count; k++)
|
||||
mqnic_free_rx_buf(priv->rxq[k]);
|
||||
|
||||
netif_carrier_off(ndev);
|
||||
return 0;
|
||||
@ -237,8 +231,8 @@ void mqnic_update_stats(struct net_device *ndev)
|
||||
|
||||
packets = 0;
|
||||
bytes = 0;
|
||||
for (k = 0; k < priv->rx_queue_count; k++) {
|
||||
const struct mqnic_ring *ring = priv->rx_ring[k];
|
||||
for (k = 0; k < priv->rxq_count; k++) {
|
||||
const struct mqnic_ring *ring = priv->rxq[k];
|
||||
|
||||
packets += READ_ONCE(ring->packets);
|
||||
bytes += READ_ONCE(ring->bytes);
|
||||
@ -248,8 +242,8 @@ void mqnic_update_stats(struct net_device *ndev)
|
||||
|
||||
packets = 0;
|
||||
bytes = 0;
|
||||
for (k = 0; k < priv->tx_queue_count; k++) {
|
||||
const struct mqnic_ring *ring = priv->tx_ring[k];
|
||||
for (k = 0; k < priv->txq_count; k++) {
|
||||
const struct mqnic_ring *ring = priv->txq[k];
|
||||
|
||||
packets += READ_ONCE(ring->packets);
|
||||
bytes += READ_ONCE(ring->bytes);
|
||||
@ -427,7 +421,7 @@ int mqnic_create_netdev(struct mqnic_if *interface, struct net_device **ndev_ptr
|
||||
int k;
|
||||
u32 desc_block_size;
|
||||
|
||||
ndev = alloc_etherdev_mqs(sizeof(*priv), MQNIC_MAX_TX_RINGS, MQNIC_MAX_RX_RINGS);
|
||||
ndev = alloc_etherdev_mqs(sizeof(*priv), MQNIC_MAX_TXQ, MQNIC_MAX_RXQ);
|
||||
if (!ndev) {
|
||||
dev_err(dev, "Failed to allocate memory");
|
||||
return -ENOMEM;
|
||||
@ -454,32 +448,32 @@ int mqnic_create_netdev(struct mqnic_if *interface, struct net_device **ndev_ptr
|
||||
// associate interface resources
|
||||
priv->if_features = interface->if_features;
|
||||
|
||||
priv->event_queue_count = interface->event_queue_count;
|
||||
for (k = 0; k < interface->event_queue_count; k++)
|
||||
priv->event_ring[k] = interface->event_ring[k];
|
||||
priv->eq_count = interface->eq_count;
|
||||
for (k = 0; k < interface->eq_count; k++)
|
||||
priv->eq[k] = interface->eq[k];
|
||||
|
||||
priv->tx_queue_count = interface->tx_queue_count;
|
||||
for (k = 0; k < interface->tx_queue_count; k++)
|
||||
priv->tx_ring[k] = interface->tx_ring[k];
|
||||
priv->txq_count = interface->txq_count;
|
||||
for (k = 0; k < interface->txq_count; k++)
|
||||
priv->txq[k] = interface->txq[k];
|
||||
|
||||
priv->tx_cpl_queue_count = interface->tx_cpl_queue_count;
|
||||
for (k = 0; k < interface->tx_cpl_queue_count; k++)
|
||||
priv->tx_cpl_ring[k] = interface->tx_cpl_ring[k];
|
||||
priv->tx_cq_count = interface->tx_cq_count;
|
||||
for (k = 0; k < interface->tx_cq_count; k++)
|
||||
priv->tx_cq[k] = interface->tx_cq[k];
|
||||
|
||||
priv->rx_queue_count = interface->rx_queue_count;
|
||||
for (k = 0; k < interface->rx_queue_count; k++)
|
||||
priv->rx_ring[k] = interface->rx_ring[k];
|
||||
priv->rxq_count = interface->rxq_count;
|
||||
for (k = 0; k < interface->rxq_count; k++)
|
||||
priv->rxq[k] = interface->rxq[k];
|
||||
|
||||
priv->rx_cpl_queue_count = interface->rx_cpl_queue_count;
|
||||
for (k = 0; k < interface->rx_cpl_queue_count; k++)
|
||||
priv->rx_cpl_ring[k] = interface->rx_cpl_ring[k];
|
||||
priv->rx_cq_count = interface->rx_cq_count;
|
||||
for (k = 0; k < interface->rx_cq_count; k++)
|
||||
priv->rx_cq[k] = interface->rx_cq[k];
|
||||
|
||||
priv->sched_block_count = interface->sched_block_count;
|
||||
for (k = 0; k < interface->sched_block_count; k++)
|
||||
priv->sched_block[k] = interface->sched_block[k];
|
||||
|
||||
netif_set_real_num_tx_queues(ndev, priv->tx_queue_count);
|
||||
netif_set_real_num_rx_queues(ndev, priv->rx_queue_count);
|
||||
netif_set_real_num_tx_queues(ndev, priv->txq_count);
|
||||
netif_set_real_num_rx_queues(ndev, priv->rxq_count);
|
||||
|
||||
// set MAC
|
||||
ndev->addr_len = ETH_ALEN;
|
||||
@ -507,29 +501,29 @@ int mqnic_create_netdev(struct mqnic_if *interface, struct net_device **ndev_ptr
|
||||
desc_block_size = min_t(u32, interface->max_desc_block_size, 4);
|
||||
|
||||
// allocate ring buffers
|
||||
for (k = 0; k < priv->tx_queue_count; k++) {
|
||||
ret = mqnic_alloc_tx_ring(priv->tx_ring[k], mqnic_num_tx_queue_entries,
|
||||
for (k = 0; k < priv->txq_count; k++) {
|
||||
ret = mqnic_alloc_tx_ring(priv->txq[k], mqnic_num_txq_entries,
|
||||
MQNIC_DESC_SIZE * desc_block_size);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < priv->tx_cpl_queue_count; k++) {
|
||||
ret = mqnic_alloc_cq_ring(priv->tx_cpl_ring[k], mqnic_num_tx_queue_entries,
|
||||
for (k = 0; k < priv->tx_cq_count; k++) {
|
||||
ret = mqnic_alloc_cq(priv->tx_cq[k], mqnic_num_txq_entries,
|
||||
MQNIC_CPL_SIZE);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < priv->rx_queue_count; k++) {
|
||||
ret = mqnic_alloc_rx_ring(priv->rx_ring[k], mqnic_num_rx_queue_entries,
|
||||
for (k = 0; k < priv->rxq_count; k++) {
|
||||
ret = mqnic_alloc_rx_ring(priv->rxq[k], mqnic_num_rxq_entries,
|
||||
MQNIC_DESC_SIZE);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < priv->rx_cpl_queue_count; k++) {
|
||||
ret = mqnic_alloc_cq_ring(priv->rx_cpl_ring[k], mqnic_num_rx_queue_entries,
|
||||
for (k = 0; k < priv->rx_cq_count; k++) {
|
||||
ret = mqnic_alloc_cq(priv->rx_cq[k], mqnic_num_rxq_entries,
|
||||
MQNIC_CPL_SIZE);
|
||||
|
||||
if (ret)
|
||||
@ -588,21 +582,21 @@ void mqnic_destroy_netdev(struct net_device **ndev_ptr)
|
||||
unregister_netdev(ndev);
|
||||
|
||||
// free rings
|
||||
for (k = 0; k < ARRAY_SIZE(priv->tx_ring); k++)
|
||||
if (priv->tx_ring[k])
|
||||
mqnic_free_tx_ring(priv->tx_ring[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(priv->txq); k++)
|
||||
if (priv->txq[k])
|
||||
mqnic_free_tx_ring(priv->txq[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(priv->tx_cpl_ring); k++)
|
||||
if (priv->tx_cpl_ring[k])
|
||||
mqnic_free_cq_ring(priv->tx_cpl_ring[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(priv->tx_cq); k++)
|
||||
if (priv->tx_cq[k])
|
||||
mqnic_free_cq(priv->tx_cq[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(priv->rx_ring); k++)
|
||||
if (priv->rx_ring[k])
|
||||
mqnic_free_rx_ring(priv->rx_ring[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(priv->rxq); k++)
|
||||
if (priv->rxq[k])
|
||||
mqnic_free_rx_ring(priv->rxq[k]);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(priv->rx_cpl_ring); k++)
|
||||
if (priv->rx_cpl_ring[k])
|
||||
mqnic_free_cq_ring(priv->rx_cpl_ring[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(priv->rx_cq); k++)
|
||||
if (priv->rx_cq[k])
|
||||
mqnic_free_cq(priv->rx_cq[k]);
|
||||
|
||||
*ndev_ptr = NULL;
|
||||
free_netdev(ndev);
|
||||
|
@ -145,17 +145,17 @@ void mqnic_free_rx_ring(struct mqnic_ring *ring)
|
||||
}
|
||||
|
||||
int mqnic_activate_rx_ring(struct mqnic_ring *ring, struct mqnic_priv *priv,
|
||||
struct mqnic_cq_ring *cq_ring)
|
||||
struct mqnic_cq *cq)
|
||||
{
|
||||
mqnic_deactivate_rx_ring(ring);
|
||||
|
||||
if (!ring->buf || !priv || !cq_ring || cq_ring->handler || cq_ring->src_ring)
|
||||
if (!ring->buf || !priv || !cq || cq->handler || cq->src_ring)
|
||||
return -EINVAL;
|
||||
|
||||
ring->priv = priv;
|
||||
ring->cq_ring = cq_ring;
|
||||
cq_ring->src_ring = ring;
|
||||
cq_ring->handler = mqnic_rx_irq;
|
||||
ring->cq = cq;
|
||||
cq->src_ring = ring;
|
||||
cq->handler = mqnic_rx_irq;
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
@ -165,8 +165,8 @@ int mqnic_activate_rx_ring(struct mqnic_ring *ring, struct mqnic_priv *priv,
|
||||
// set base address
|
||||
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 0);
|
||||
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 4);
|
||||
// set completion queue index
|
||||
iowrite32(cq_ring->index, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
|
||||
// set CQN
|
||||
iowrite32(cq->cqn, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
|
||||
// set pointers
|
||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
||||
@ -187,13 +187,13 @@ void mqnic_deactivate_rx_ring(struct mqnic_ring *ring)
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
if (ring->cq_ring) {
|
||||
ring->cq_ring->src_ring = NULL;
|
||||
ring->cq_ring->handler = NULL;
|
||||
if (ring->cq) {
|
||||
ring->cq->src_ring = NULL;
|
||||
ring->cq->handler = NULL;
|
||||
}
|
||||
|
||||
ring->priv = NULL;
|
||||
ring->cq_ring = NULL;
|
||||
ring->cq = NULL;
|
||||
|
||||
ring->active = 0;
|
||||
}
|
||||
@ -309,11 +309,11 @@ void mqnic_refill_rx_buffers(struct mqnic_ring *ring)
|
||||
mqnic_rx_write_head_ptr(ring);
|
||||
}
|
||||
|
||||
int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
int mqnic_process_rx_cq(struct mqnic_cq *cq, int napi_budget)
|
||||
{
|
||||
struct mqnic_if *interface = cq_ring->interface;
|
||||
struct mqnic_if *interface = cq->interface;
|
||||
struct device *dev = interface->dev;
|
||||
struct mqnic_ring *rx_ring = cq_ring->src_ring;
|
||||
struct mqnic_ring *rx_ring = cq->src_ring;
|
||||
struct mqnic_priv *priv = rx_ring->priv;
|
||||
struct mqnic_rx_info *rx_info;
|
||||
struct mqnic_cpl *cpl;
|
||||
@ -331,13 +331,13 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
return done;
|
||||
|
||||
// process completion queue
|
||||
cq_tail_ptr = cq_ring->tail_ptr;
|
||||
cq_index = cq_tail_ptr & cq_ring->size_mask;
|
||||
cq_tail_ptr = cq->tail_ptr;
|
||||
cq_index = cq_tail_ptr & cq->size_mask;
|
||||
|
||||
while (done < budget) {
|
||||
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * cq_ring->stride);
|
||||
cpl = (struct mqnic_cpl *)(cq->buf + cq_index * cq->stride);
|
||||
|
||||
if (!!(cpl->phase & cpu_to_le32(0x80000000)) == !!(cq_tail_ptr & cq_ring->size))
|
||||
if (!!(cpl->phase & cpu_to_le32(0x80000000)) == !!(cq_tail_ptr & cq->size))
|
||||
break;
|
||||
|
||||
dma_rmb();
|
||||
@ -348,16 +348,16 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
|
||||
if (unlikely(!page)) {
|
||||
dev_err(dev, "%s: ring %d null page at index %d",
|
||||
__func__, cq_ring->index, ring_index);
|
||||
__func__, rx_ring->index, ring_index);
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||
cpl, MQNIC_CPL_SIZE, true);
|
||||
break;
|
||||
}
|
||||
|
||||
skb = napi_get_frags(&cq_ring->napi);
|
||||
skb = napi_get_frags(&cq->napi);
|
||||
if (unlikely(!skb)) {
|
||||
dev_err(dev, "%s: ring %d failed to allocate skb",
|
||||
__func__, cq_ring->index);
|
||||
__func__, rx_ring->index);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -392,7 +392,7 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
skb->truesize += rx_info->len;
|
||||
|
||||
// hand off SKB
|
||||
napi_gro_frags(&cq_ring->napi);
|
||||
napi_gro_frags(&cq->napi);
|
||||
|
||||
rx_ring->packets++;
|
||||
rx_ring->bytes += le16_to_cpu(cpl->len);
|
||||
@ -400,12 +400,12 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
done++;
|
||||
|
||||
cq_tail_ptr++;
|
||||
cq_index = cq_tail_ptr & cq_ring->size_mask;
|
||||
cq_index = cq_tail_ptr & cq->size_mask;
|
||||
}
|
||||
|
||||
// update CQ tail
|
||||
cq_ring->tail_ptr = cq_tail_ptr;
|
||||
mqnic_cq_write_tail_ptr(cq_ring);
|
||||
cq->tail_ptr = cq_tail_ptr;
|
||||
mqnic_cq_write_tail_ptr(cq);
|
||||
|
||||
// process ring
|
||||
ring_tail_ptr = READ_ONCE(rx_ring->tail_ptr);
|
||||
@ -430,24 +430,24 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
return done;
|
||||
}
|
||||
|
||||
void mqnic_rx_irq(struct mqnic_cq_ring *cq)
|
||||
void mqnic_rx_irq(struct mqnic_cq *cq)
|
||||
{
|
||||
napi_schedule_irqoff(&cq->napi);
|
||||
}
|
||||
|
||||
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
|
||||
struct mqnic_cq *cq = container_of(napi, struct mqnic_cq, napi);
|
||||
int done;
|
||||
|
||||
done = mqnic_process_rx_cq(cq_ring, budget);
|
||||
done = mqnic_process_rx_cq(cq, budget);
|
||||
|
||||
if (done == budget)
|
||||
return done;
|
||||
|
||||
napi_complete(napi);
|
||||
|
||||
mqnic_arm_cq(cq_ring);
|
||||
mqnic_arm_cq(cq);
|
||||
|
||||
return done;
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ int mqnic_create_sched_block(struct mqnic_if *interface, struct mqnic_sched_bloc
|
||||
|
||||
block->index = index;
|
||||
|
||||
block->tx_queue_count = interface->tx_queue_count;
|
||||
block->txq_count = interface->txq_count;
|
||||
|
||||
block->block_rb = block_rb;
|
||||
|
||||
|
@ -147,17 +147,17 @@ void mqnic_free_tx_ring(struct mqnic_ring *ring)
|
||||
}
|
||||
|
||||
int mqnic_activate_tx_ring(struct mqnic_ring *ring, struct mqnic_priv *priv,
|
||||
struct mqnic_cq_ring *cq_ring)
|
||||
struct mqnic_cq *cq)
|
||||
{
|
||||
mqnic_deactivate_tx_ring(ring);
|
||||
|
||||
if (!ring->buf || !priv || !cq_ring || cq_ring->handler || cq_ring->src_ring)
|
||||
if (!ring->buf || !priv || !cq || cq->handler || cq->src_ring)
|
||||
return -EINVAL;
|
||||
|
||||
ring->priv = priv;
|
||||
ring->cq_ring = cq_ring;
|
||||
cq_ring->src_ring = ring;
|
||||
cq_ring->handler = mqnic_tx_irq;
|
||||
ring->cq = cq;
|
||||
cq->src_ring = ring;
|
||||
cq->handler = mqnic_tx_irq;
|
||||
|
||||
ring->head_ptr = 0;
|
||||
ring->tail_ptr = 0;
|
||||
@ -167,8 +167,8 @@ int mqnic_activate_tx_ring(struct mqnic_ring *ring, struct mqnic_priv *priv,
|
||||
// set base address
|
||||
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 0);
|
||||
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 4);
|
||||
// set completion queue index
|
||||
iowrite32(cq_ring->index, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
|
||||
// set CQN
|
||||
iowrite32(cq->cqn, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
|
||||
// set pointers
|
||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
||||
@ -187,13 +187,13 @@ void mqnic_deactivate_tx_ring(struct mqnic_ring *ring)
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
if (ring->cq_ring) {
|
||||
ring->cq_ring->src_ring = NULL;
|
||||
ring->cq_ring->handler = NULL;
|
||||
if (ring->cq) {
|
||||
ring->cq->src_ring = NULL;
|
||||
ring->cq->handler = NULL;
|
||||
}
|
||||
|
||||
ring->priv = NULL;
|
||||
ring->cq_ring = NULL;
|
||||
ring->cq = NULL;
|
||||
|
||||
ring->active = 0;
|
||||
}
|
||||
@ -254,10 +254,10 @@ int mqnic_free_tx_buf(struct mqnic_ring *ring)
|
||||
return cnt;
|
||||
}
|
||||
|
||||
int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
int mqnic_process_tx_cq(struct mqnic_cq *cq, int napi_budget)
|
||||
{
|
||||
struct mqnic_if *interface = cq_ring->interface;
|
||||
struct mqnic_ring *tx_ring = cq_ring->src_ring;
|
||||
struct mqnic_if *interface = cq->interface;
|
||||
struct mqnic_ring *tx_ring = cq->src_ring;
|
||||
struct mqnic_priv *priv = tx_ring->priv;
|
||||
struct mqnic_tx_info *tx_info;
|
||||
struct mqnic_cpl *cpl;
|
||||
@ -278,13 +278,13 @@ int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
netdev_txq_bql_complete_prefetchw(tx_ring->tx_queue);
|
||||
|
||||
// process completion queue
|
||||
cq_tail_ptr = cq_ring->tail_ptr;
|
||||
cq_index = cq_tail_ptr & cq_ring->size_mask;
|
||||
cq_tail_ptr = cq->tail_ptr;
|
||||
cq_index = cq_tail_ptr & cq->size_mask;
|
||||
|
||||
while (done < budget) {
|
||||
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * cq_ring->stride);
|
||||
cpl = (struct mqnic_cpl *)(cq->buf + cq_index * cq->stride);
|
||||
|
||||
if (!!(cpl->phase & cpu_to_le32(0x80000000)) == !!(cq_tail_ptr & cq_ring->size))
|
||||
if (!!(cpl->phase & cpu_to_le32(0x80000000)) == !!(cq_tail_ptr & cq->size))
|
||||
break;
|
||||
|
||||
dma_rmb();
|
||||
@ -307,12 +307,12 @@ int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
done++;
|
||||
|
||||
cq_tail_ptr++;
|
||||
cq_index = cq_tail_ptr & cq_ring->size_mask;
|
||||
cq_index = cq_tail_ptr & cq->size_mask;
|
||||
}
|
||||
|
||||
// update CQ tail
|
||||
cq_ring->tail_ptr = cq_tail_ptr;
|
||||
mqnic_cq_write_tail_ptr(cq_ring);
|
||||
cq->tail_ptr = cq_tail_ptr;
|
||||
mqnic_cq_write_tail_ptr(cq);
|
||||
|
||||
// process ring
|
||||
ring_tail_ptr = READ_ONCE(tx_ring->tail_ptr);
|
||||
@ -341,24 +341,24 @@ int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget)
|
||||
return done;
|
||||
}
|
||||
|
||||
void mqnic_tx_irq(struct mqnic_cq_ring *cq)
|
||||
void mqnic_tx_irq(struct mqnic_cq *cq)
|
||||
{
|
||||
napi_schedule_irqoff(&cq->napi);
|
||||
}
|
||||
|
||||
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
|
||||
struct mqnic_cq *cq = container_of(napi, struct mqnic_cq, napi);
|
||||
int done;
|
||||
|
||||
done = mqnic_process_tx_cq(cq_ring, budget);
|
||||
done = mqnic_process_tx_cq(cq, budget);
|
||||
|
||||
if (done == budget)
|
||||
return done;
|
||||
|
||||
napi_complete(napi);
|
||||
|
||||
mqnic_arm_cq(cq_ring);
|
||||
mqnic_arm_cq(cq);
|
||||
|
||||
return done;
|
||||
}
|
||||
@ -449,11 +449,11 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
|
||||
ring_index = skb_get_queue_mapping(skb);
|
||||
|
||||
if (unlikely(ring_index >= priv->tx_queue_count))
|
||||
if (unlikely(ring_index >= priv->txq_count))
|
||||
// queue mapping out of range
|
||||
goto tx_drop;
|
||||
|
||||
ring = priv->tx_ring[ring_index];
|
||||
ring = priv->txq[ring_index];
|
||||
|
||||
tail_ptr = READ_ONCE(ring->tail_ptr);
|
||||
|
||||
|
@ -182,25 +182,25 @@ int main(int argc, char *argv[])
|
||||
printf("TX MTU: %d\n", mqnic_reg_read32(dev_interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_TX_MTU));
|
||||
printf("RX MTU: %d\n", mqnic_reg_read32(dev_interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_RX_MTU));
|
||||
|
||||
printf("Event queue offset: 0x%08x\n", dev_interface->event_queue_offset);
|
||||
printf("Event queue count: %d\n", dev_interface->event_queue_count);
|
||||
printf("Event queue stride: 0x%08x\n", dev_interface->event_queue_stride);
|
||||
printf("EQ offset: 0x%08x\n", dev_interface->eq_offset);
|
||||
printf("EQ count: %d\n", dev_interface->eq_count);
|
||||
printf("EQ stride: 0x%08x\n", dev_interface->eq_stride);
|
||||
|
||||
printf("TX queue offset: 0x%08x\n", dev_interface->tx_queue_offset);
|
||||
printf("TX queue count: %d\n", dev_interface->tx_queue_count);
|
||||
printf("TX queue stride: 0x%08x\n", dev_interface->tx_queue_stride);
|
||||
printf("TXQ offset: 0x%08x\n", dev_interface->txq_offset);
|
||||
printf("TXQ count: %d\n", dev_interface->txq_count);
|
||||
printf("TXQ stride: 0x%08x\n", dev_interface->txq_stride);
|
||||
|
||||
printf("TX completion queue offset: 0x%08x\n", dev_interface->tx_cpl_queue_offset);
|
||||
printf("TX completion queue count: %d\n", dev_interface->tx_cpl_queue_count);
|
||||
printf("TX completion queue stride: 0x%08x\n", dev_interface->tx_cpl_queue_stride);
|
||||
printf("TX CQ offset: 0x%08x\n", dev_interface->tx_cq_offset);
|
||||
printf("TX CQ count: %d\n", dev_interface->tx_cq_count);
|
||||
printf("TX CQ stride: 0x%08x\n", dev_interface->tx_cq_stride);
|
||||
|
||||
printf("RX queue offset: 0x%08x\n", dev_interface->rx_queue_offset);
|
||||
printf("RX queue count: %d\n", dev_interface->rx_queue_count);
|
||||
printf("RX queue stride: 0x%08x\n", dev_interface->rx_queue_stride);
|
||||
printf("RXQ offset: 0x%08x\n", dev_interface->rxq_offset);
|
||||
printf("RXQ count: %d\n", dev_interface->rxq_count);
|
||||
printf("RXQ stride: 0x%08x\n", dev_interface->rxq_stride);
|
||||
|
||||
printf("RX completion queue offset: 0x%08x\n", dev_interface->rx_cpl_queue_offset);
|
||||
printf("RX completion queue count: %d\n", dev_interface->rx_cpl_queue_count);
|
||||
printf("RX completion queue stride: 0x%08x\n", dev_interface->rx_cpl_queue_stride);
|
||||
printf("RX CQ offset: 0x%08x\n", dev_interface->rx_cq_offset);
|
||||
printf("RX CQ count: %d\n", dev_interface->rx_cq_count);
|
||||
printf("RX CQ stride: 0x%08x\n", dev_interface->rx_cq_stride);
|
||||
|
||||
if (port < 0 || port >= dev_interface->port_count)
|
||||
{
|
||||
|
@ -249,25 +249,25 @@ int main(int argc, char *argv[])
|
||||
printf("TX MTU: %d\n", mqnic_interface_get_tx_mtu(dev_interface));
|
||||
printf("RX MTU: %d\n", mqnic_interface_get_rx_mtu(dev_interface));
|
||||
|
||||
printf("Event queue offset: 0x%08x\n", dev_interface->event_queue_offset);
|
||||
printf("Event queue count: %d\n", dev_interface->event_queue_count);
|
||||
printf("Event queue stride: 0x%08x\n", dev_interface->event_queue_stride);
|
||||
printf("EQ offset: 0x%08x\n", dev_interface->eq_offset);
|
||||
printf("EQ count: %d\n", dev_interface->eq_count);
|
||||
printf("EQ stride: 0x%08x\n", dev_interface->eq_stride);
|
||||
|
||||
printf("TX queue offset: 0x%08x\n", dev_interface->tx_queue_offset);
|
||||
printf("TX queue count: %d\n", dev_interface->tx_queue_count);
|
||||
printf("TX queue stride: 0x%08x\n", dev_interface->tx_queue_stride);
|
||||
printf("TXQ offset: 0x%08x\n", dev_interface->txq_offset);
|
||||
printf("TXQ count: %d\n", dev_interface->txq_count);
|
||||
printf("TXQ stride: 0x%08x\n", dev_interface->txq_stride);
|
||||
|
||||
printf("TX completion queue offset: 0x%08x\n", dev_interface->tx_cpl_queue_offset);
|
||||
printf("TX completion queue count: %d\n", dev_interface->tx_cpl_queue_count);
|
||||
printf("TX completion queue stride: 0x%08x\n", dev_interface->tx_cpl_queue_stride);
|
||||
printf("TX CQ offset: 0x%08x\n", dev_interface->tx_cq_offset);
|
||||
printf("TX CQ count: %d\n", dev_interface->tx_cq_count);
|
||||
printf("TX CQ stride: 0x%08x\n", dev_interface->tx_cq_stride);
|
||||
|
||||
printf("RX queue offset: 0x%08x\n", dev_interface->rx_queue_offset);
|
||||
printf("RX queue count: %d\n", dev_interface->rx_queue_count);
|
||||
printf("RX queue stride: 0x%08x\n", dev_interface->rx_queue_stride);
|
||||
printf("RXQ offset: 0x%08x\n", dev_interface->rxq_offset);
|
||||
printf("RXQ count: %d\n", dev_interface->rxq_count);
|
||||
printf("RXQ stride: 0x%08x\n", dev_interface->rxq_stride);
|
||||
|
||||
printf("RX completion queue offset: 0x%08x\n", dev_interface->rx_cpl_queue_offset);
|
||||
printf("RX completion queue count: %d\n", dev_interface->rx_cpl_queue_count);
|
||||
printf("RX completion queue stride: 0x%08x\n", dev_interface->rx_cpl_queue_stride);
|
||||
printf("RX CQ offset: 0x%08x\n", dev_interface->rx_cq_offset);
|
||||
printf("RX CQ count: %d\n", dev_interface->rx_cq_count);
|
||||
printf("RX CQ stride: 0x%08x\n", dev_interface->rx_cq_stride);
|
||||
|
||||
for (int p = 0; p < dev_interface->port_count; p++)
|
||||
{
|
||||
@ -379,11 +379,11 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
}
|
||||
|
||||
printf("TX queue info\n");
|
||||
printf("TXQ info\n");
|
||||
printf(" Queue Base Address E B LS CPL Head Tail Len\n");
|
||||
for (int k = 0; k < dev_interface->tx_queue_count; k++)
|
||||
for (int k = 0; k < dev_interface->txq_count; k++)
|
||||
{
|
||||
volatile uint8_t *base = dev_interface->regs+dev_interface->tx_queue_offset+k*dev_interface->tx_queue_stride;
|
||||
volatile uint8_t *base = dev_interface->regs+dev_interface->txq_offset+k*dev_interface->txq_stride;
|
||||
|
||||
uint64_t base_addr = (uint64_t)mqnic_reg_read32(base, MQNIC_QUEUE_BASE_ADDR_REG) + ((uint64_t)mqnic_reg_read32(base, MQNIC_QUEUE_BASE_ADDR_REG+4) << 32);
|
||||
uint8_t active = (mqnic_reg_read32(base, MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG) & MQNIC_QUEUE_ACTIVE_MASK) != 0;
|
||||
@ -397,30 +397,30 @@ int main(int argc, char *argv[])
|
||||
printf("TXQ %4d 0x%016lx %d %d %2d %4d %6d %6d %6d\n", k, base_addr, active, log_desc_block_size, log_queue_size, cpl_queue_index, head_ptr, tail_ptr, occupancy);
|
||||
}
|
||||
|
||||
printf("TX completion queue info\n");
|
||||
printf("TX CQ info\n");
|
||||
printf(" Queue Base Address E LS A C Int Head Tail Len\n");
|
||||
for (int k = 0; k < dev_interface->tx_queue_count; k++)
|
||||
for (int k = 0; k < dev_interface->tx_cq_count; k++)
|
||||
{
|
||||
volatile uint8_t *base = dev_interface->regs+dev_interface->tx_cpl_queue_offset+k*dev_interface->tx_cpl_queue_stride;
|
||||
volatile uint8_t *base = dev_interface->regs+dev_interface->tx_cq_offset+k*dev_interface->tx_cq_stride;
|
||||
|
||||
uint64_t base_addr = (uint64_t)mqnic_reg_read32(base, MQNIC_CPL_QUEUE_BASE_ADDR_REG) + ((uint64_t)mqnic_reg_read32(base, MQNIC_CPL_QUEUE_BASE_ADDR_REG+4) << 32);
|
||||
uint8_t active = (mqnic_reg_read32(base, MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG) & MQNIC_CPL_QUEUE_ACTIVE_MASK) != 0;
|
||||
uint8_t log_queue_size = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG) & 0xff;
|
||||
uint8_t armed = (mqnic_reg_read32(base, MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG) & MQNIC_CPL_QUEUE_ARM_MASK) != 0;
|
||||
uint8_t continuous = (mqnic_reg_read32(base, MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG) & MQNIC_CPL_QUEUE_CONT_MASK) != 0;
|
||||
uint32_t interrupt_index = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG) & 0xffff;
|
||||
uint32_t head_ptr = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_HEAD_PTR_REG);
|
||||
uint32_t tail_ptr = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_TAIL_PTR_REG);
|
||||
uint64_t base_addr = (uint64_t)mqnic_reg_read32(base, MQNIC_CQ_BASE_ADDR_REG) + ((uint64_t)mqnic_reg_read32(base, MQNIC_CQ_BASE_ADDR_REG+4) << 32);
|
||||
uint8_t active = (mqnic_reg_read32(base, MQNIC_CQ_ACTIVE_LOG_SIZE_REG) & MQNIC_CQ_ACTIVE_MASK) != 0;
|
||||
uint8_t log_queue_size = mqnic_reg_read32(base, MQNIC_CQ_ACTIVE_LOG_SIZE_REG) & 0xff;
|
||||
uint8_t armed = (mqnic_reg_read32(base, MQNIC_CQ_INTERRUPT_INDEX_REG) & MQNIC_CQ_ARM_MASK) != 0;
|
||||
uint8_t continuous = (mqnic_reg_read32(base, MQNIC_CQ_INTERRUPT_INDEX_REG) & MQNIC_CQ_CONT_MASK) != 0;
|
||||
uint32_t interrupt_index = mqnic_reg_read32(base, MQNIC_CQ_INTERRUPT_INDEX_REG) & 0xffff;
|
||||
uint32_t head_ptr = mqnic_reg_read32(base, MQNIC_CQ_HEAD_PTR_REG);
|
||||
uint32_t tail_ptr = mqnic_reg_read32(base, MQNIC_CQ_TAIL_PTR_REG);
|
||||
uint32_t occupancy = (head_ptr - tail_ptr) & 0xffff;
|
||||
|
||||
printf("TX CQ %4d 0x%016lx %d %2d %d %d %4d %6d %6d %6d\n", k, base_addr, active, log_queue_size, armed, continuous, interrupt_index, head_ptr, tail_ptr, occupancy);
|
||||
}
|
||||
|
||||
printf("RX queue info\n");
|
||||
printf("RXQ info\n");
|
||||
printf(" Queue Base Address E B LS CPL Head Tail Len\n");
|
||||
for (int k = 0; k < dev_interface->rx_queue_count; k++)
|
||||
for (int k = 0; k < dev_interface->rxq_count; k++)
|
||||
{
|
||||
volatile uint8_t *base = dev_interface->regs+dev_interface->rx_queue_offset+k*dev_interface->rx_queue_stride;
|
||||
volatile uint8_t *base = dev_interface->regs+dev_interface->rxq_offset+k*dev_interface->rxq_stride;
|
||||
|
||||
uint64_t base_addr = (uint64_t)mqnic_reg_read32(base, MQNIC_QUEUE_BASE_ADDR_REG) + ((uint64_t)mqnic_reg_read32(base, MQNIC_QUEUE_BASE_ADDR_REG+4) << 32);
|
||||
uint8_t active = (mqnic_reg_read32(base, MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG) & MQNIC_QUEUE_ACTIVE_MASK) != 0;
|
||||
@ -434,39 +434,39 @@ int main(int argc, char *argv[])
|
||||
printf("RXQ %4d 0x%016lx %d %d %2d %4d %6d %6d %6d\n", k, base_addr, active, log_desc_block_size, log_queue_size, cpl_queue_index, head_ptr, tail_ptr, occupancy);
|
||||
}
|
||||
|
||||
printf("RX completion queue info\n");
|
||||
printf("RX CQ info\n");
|
||||
printf(" Queue Base Address E LS A C Int Head Tail Len\n");
|
||||
for (int k = 0; k < dev_interface->rx_queue_count; k++)
|
||||
for (int k = 0; k < dev_interface->rx_cq_count; k++)
|
||||
{
|
||||
volatile uint8_t *base = dev_interface->regs+dev_interface->rx_cpl_queue_offset+k*dev_interface->rx_cpl_queue_stride;
|
||||
volatile uint8_t *base = dev_interface->regs+dev_interface->rx_cq_offset+k*dev_interface->rx_cq_stride;
|
||||
|
||||
uint64_t base_addr = (uint64_t)mqnic_reg_read32(base, MQNIC_CPL_QUEUE_BASE_ADDR_REG) + ((uint64_t)mqnic_reg_read32(base, MQNIC_CPL_QUEUE_BASE_ADDR_REG+4) << 32);
|
||||
uint8_t active = (mqnic_reg_read32(base, MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG) & MQNIC_CPL_QUEUE_ACTIVE_MASK) != 0;
|
||||
uint8_t log_queue_size = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG) & 0xff;
|
||||
uint8_t armed = (mqnic_reg_read32(base, MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG) & MQNIC_CPL_QUEUE_ARM_MASK) != 0;
|
||||
uint8_t continuous = (mqnic_reg_read32(base, MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG) & MQNIC_CPL_QUEUE_CONT_MASK) != 0;
|
||||
uint32_t interrupt_index = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG) & 0xffff;
|
||||
uint32_t head_ptr = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_HEAD_PTR_REG);
|
||||
uint32_t tail_ptr = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_TAIL_PTR_REG);
|
||||
uint64_t base_addr = (uint64_t)mqnic_reg_read32(base, MQNIC_CQ_BASE_ADDR_REG) + ((uint64_t)mqnic_reg_read32(base, MQNIC_CQ_BASE_ADDR_REG+4) << 32);
|
||||
uint8_t active = (mqnic_reg_read32(base, MQNIC_CQ_ACTIVE_LOG_SIZE_REG) & MQNIC_CQ_ACTIVE_MASK) != 0;
|
||||
uint8_t log_queue_size = mqnic_reg_read32(base, MQNIC_CQ_ACTIVE_LOG_SIZE_REG) & 0xff;
|
||||
uint8_t armed = (mqnic_reg_read32(base, MQNIC_CQ_INTERRUPT_INDEX_REG) & MQNIC_CQ_ARM_MASK) != 0;
|
||||
uint8_t continuous = (mqnic_reg_read32(base, MQNIC_CQ_INTERRUPT_INDEX_REG) & MQNIC_CQ_CONT_MASK) != 0;
|
||||
uint32_t interrupt_index = mqnic_reg_read32(base, MQNIC_CQ_INTERRUPT_INDEX_REG) & 0xffff;
|
||||
uint32_t head_ptr = mqnic_reg_read32(base, MQNIC_CQ_HEAD_PTR_REG);
|
||||
uint32_t tail_ptr = mqnic_reg_read32(base, MQNIC_CQ_TAIL_PTR_REG);
|
||||
uint32_t occupancy = (head_ptr - tail_ptr) & 0xffff;
|
||||
|
||||
printf("RX CQ %4d 0x%016lx %d %2d %d %d %4d %6d %6d %6d\n", k, base_addr, active, log_queue_size, armed, continuous, interrupt_index, head_ptr, tail_ptr, occupancy);
|
||||
}
|
||||
|
||||
printf("Event queue info\n");
|
||||
printf("EQ info\n");
|
||||
printf(" Queue Base Address E LS A C Int Head Tail Len\n");
|
||||
for (int k = 0; k < dev_interface->event_queue_count; k++)
|
||||
for (int k = 0; k < dev_interface->eq_count; k++)
|
||||
{
|
||||
volatile uint8_t *base = dev_interface->regs+dev_interface->event_queue_offset+k*dev_interface->event_queue_stride;
|
||||
volatile uint8_t *base = dev_interface->regs+dev_interface->eq_offset+k*dev_interface->eq_stride;
|
||||
|
||||
uint64_t base_addr = (uint64_t)mqnic_reg_read32(base, MQNIC_CPL_QUEUE_BASE_ADDR_REG) + ((uint64_t)mqnic_reg_read32(base, MQNIC_CPL_QUEUE_BASE_ADDR_REG+4) << 32);
|
||||
uint8_t active = (mqnic_reg_read32(base, MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG) & MQNIC_CPL_QUEUE_ACTIVE_MASK) != 0;
|
||||
uint8_t log_queue_size = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG) & 0xff;
|
||||
uint8_t armed = (mqnic_reg_read32(base, MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG) & MQNIC_CPL_QUEUE_ARM_MASK) != 0;
|
||||
uint8_t continuous = (mqnic_reg_read32(base, MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG) & MQNIC_CPL_QUEUE_CONT_MASK) != 0;
|
||||
uint32_t interrupt_index = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG) & 0xffff;
|
||||
uint32_t head_ptr = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_HEAD_PTR_REG);
|
||||
uint32_t tail_ptr = mqnic_reg_read32(base, MQNIC_CPL_QUEUE_TAIL_PTR_REG);
|
||||
uint64_t base_addr = (uint64_t)mqnic_reg_read32(base, MQNIC_EQ_BASE_ADDR_REG) + ((uint64_t)mqnic_reg_read32(base, MQNIC_EQ_BASE_ADDR_REG+4) << 32);
|
||||
uint8_t active = (mqnic_reg_read32(base, MQNIC_EQ_ACTIVE_LOG_SIZE_REG) & MQNIC_EQ_ACTIVE_MASK) != 0;
|
||||
uint8_t log_queue_size = mqnic_reg_read32(base, MQNIC_EQ_ACTIVE_LOG_SIZE_REG) & 0xff;
|
||||
uint8_t armed = (mqnic_reg_read32(base, MQNIC_EQ_INTERRUPT_INDEX_REG) & MQNIC_EQ_ARM_MASK) != 0;
|
||||
uint8_t continuous = (mqnic_reg_read32(base, MQNIC_EQ_INTERRUPT_INDEX_REG) & MQNIC_EQ_CONT_MASK) != 0;
|
||||
uint32_t interrupt_index = mqnic_reg_read32(base, MQNIC_EQ_INTERRUPT_INDEX_REG) & 0xffff;
|
||||
uint32_t head_ptr = mqnic_reg_read32(base, MQNIC_EQ_HEAD_PTR_REG);
|
||||
uint32_t tail_ptr = mqnic_reg_read32(base, MQNIC_EQ_TAIL_PTR_REG);
|
||||
uint32_t occupancy = (head_ptr - tail_ptr) & 0xffff;
|
||||
|
||||
printf("EQ %4d 0x%016lx %d %2d %d %d %4d %6d %6d %6d\n", k, base_addr, active, log_queue_size, armed, continuous, interrupt_index, head_ptr, tail_ptr, occupancy);
|
||||
@ -475,7 +475,7 @@ int main(int argc, char *argv[])
|
||||
for (int k = 0; k < dev_sched_block->sched_count; k++)
|
||||
{
|
||||
printf("Scheduler block %d scheduler %d\n", sched_block, k);
|
||||
for (int l = 0; l < dev_interface->tx_queue_count; l++)
|
||||
for (int l = 0; l < dev_interface->txq_count; l++)
|
||||
{
|
||||
printf("Sched %2d queue %4d state: 0x%08x\n", k, l, mqnic_reg_read32(dev_sched_block->sched[k]->regs, l*4));
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user