mirror of
https://github.com/corundum/corundum.git
synced 2025-01-16 08:12:53 +08:00
Expose port and scheduler block counts in IF control block; update driver model, driver, and userspace tools to handle scheduler blocks separately from ports
This commit is contained in:
parent
09128df360
commit
cbd9d0dfc6
@ -4,7 +4,7 @@
|
||||
Interface control register block
|
||||
================================
|
||||
|
||||
The interface control register block has a header with type 0x0000C001, version 0x00000200, and contains several interface-level control registers.
|
||||
The interface control register block has a header with type 0x0000C001, version 0x00000300, and contains several interface-level control registers.
|
||||
|
||||
.. table::
|
||||
|
||||
@ -13,21 +13,29 @@ The interface control register block has a header with type 0x0000C001, version
|
||||
======== ============= ====== ====== ====== ====== =============
|
||||
RBB+0x00 Type Vendor ID Type RO 0x0000C001
|
||||
-------- ------------- -------------- -------------- -------------
|
||||
RBB+0x04 Version Major Minor Patch Meta RO 0x00000200
|
||||
RBB+0x04 Version Major Minor Patch Meta RO 0x00000300
|
||||
-------- ------------- ------ ------ ------ ------ -------------
|
||||
RBB+0x08 Next pointer Pointer to next register block RO -
|
||||
-------- ------------- ------------------------------ -------------
|
||||
RBB+0x0C Features Interface feature bits RO -
|
||||
-------- ------------- ------------------------------ -------------
|
||||
RBB+0x10 Max TX MTU Max TX MTU RO -
|
||||
RBB+0x10 Port count Port count RO -
|
||||
-------- ------------- ------------------------------ -------------
|
||||
RBB+0x14 Max RX MTU Max RX MTU RO -
|
||||
RBB+0x14 Sched count Scheduler block count RO -
|
||||
-------- ------------- ------------------------------ -------------
|
||||
RBB+0x18 TX MTU TX MTU RW -
|
||||
RBB+0x18 - - RO -
|
||||
-------- ------------- ------------------------------ -------------
|
||||
RBB+0x1C RX MTU RX MTU RW -
|
||||
RBB+0x1C - - RO -
|
||||
-------- ------------- ------------------------------ -------------
|
||||
RBB+0x20 RSS mask RSS mask RW 0x00000000
|
||||
RBB+0x20 Max TX MTU Max TX MTU RO -
|
||||
-------- ------------- ------------------------------ -------------
|
||||
RBB+0x24 Max RX MTU Max RX MTU RO -
|
||||
-------- ------------- ------------------------------ -------------
|
||||
RBB+0x28 TX MTU TX MTU RW -
|
||||
-------- ------------- ------------------------------ -------------
|
||||
RBB+0x2C RX MTU RX MTU RW -
|
||||
-------- ------------- ------------------------------ -------------
|
||||
RBB+0x30 RSS mask RSS mask RW 0x00000000
|
||||
======== ============= ============================== =============
|
||||
|
||||
See :ref:`rb_overview` for definitions of the standard register block header fields.
|
||||
@ -58,6 +66,30 @@ See :ref:`rb_overview` for definitions of the standard register block header fie
|
||||
10 RX flow hash offloading
|
||||
=== =======================
|
||||
|
||||
.. object:: Port count
|
||||
|
||||
The port count field contains the number of ports associated with the interface, as configured via Verilog parameters during synthesis.
|
||||
|
||||
.. table::
|
||||
|
||||
======== ====== ====== ====== ====== =============
|
||||
Address 31..24 23..16 15..8 7..0 Reset value
|
||||
======== ====== ====== ====== ====== =============
|
||||
RBB+0x10 Port count RO -
|
||||
======== ============================== =============
|
||||
|
||||
.. object:: Scheduler block count
|
||||
|
||||
The scheduler block count field contains the number of scheduler blocks associated with the interface, as configured via Verilog parameters during synthesis.
|
||||
|
||||
.. table::
|
||||
|
||||
======== ====== ====== ====== ====== =============
|
||||
Address 31..24 23..16 15..8 7..0 Reset value
|
||||
======== ====== ====== ====== ====== =============
|
||||
RBB+0x14 Scheduler block count RO -
|
||||
======== ============================== =============
|
||||
|
||||
.. object:: Max TX MTU
|
||||
|
||||
The max TX MTU field contains the maximum frame size on the transmit path, as configured via Verilog parameters during synthesis.
|
||||
@ -67,7 +99,7 @@ See :ref:`rb_overview` for definitions of the standard register block header fie
|
||||
======== ====== ====== ====== ====== =============
|
||||
Address 31..24 23..16 15..8 7..0 Reset value
|
||||
======== ====== ====== ====== ====== =============
|
||||
RBB+0x10 Max TX MTU RO -
|
||||
RBB+0x20 Max TX MTU RO -
|
||||
======== ============================== =============
|
||||
|
||||
.. object:: Max RX MTU
|
||||
@ -79,7 +111,7 @@ See :ref:`rb_overview` for definitions of the standard register block header fie
|
||||
======== ====== ====== ====== ====== =============
|
||||
Address 31..24 23..16 15..8 7..0 Reset value
|
||||
======== ====== ====== ====== ====== =============
|
||||
RBB+0x14 Max RX MTU RO -
|
||||
RBB+0x24 Max RX MTU RO -
|
||||
======== ============================== =============
|
||||
|
||||
.. object:: TX MTU
|
||||
@ -91,7 +123,7 @@ See :ref:`rb_overview` for definitions of the standard register block header fie
|
||||
======== ====== ====== ====== ====== =============
|
||||
Address 31..24 23..16 15..8 7..0 Reset value
|
||||
======== ====== ====== ====== ====== =============
|
||||
RBB+0x18 TX MTU RW -
|
||||
RBB+0x28 TX MTU RW -
|
||||
======== ============================== =============
|
||||
|
||||
.. object:: RX MTU
|
||||
@ -103,7 +135,7 @@ See :ref:`rb_overview` for definitions of the standard register block header fie
|
||||
======== ====== ====== ====== ====== =============
|
||||
Address 31..24 23..16 15..8 7..0 Reset value
|
||||
======== ====== ====== ====== ====== =============
|
||||
RBB+0x1C RX MTU RW -
|
||||
RBB+0x2C RX MTU RW -
|
||||
======== ============================== =============
|
||||
|
||||
.. object:: RSS mask
|
||||
@ -115,5 +147,5 @@ See :ref:`rb_overview` for definitions of the standard register block header fie
|
||||
======== ====== ====== ====== ====== =============
|
||||
Address 31..24 23..16 15..8 7..0 Reset value
|
||||
======== ====== ====== ====== ====== =============
|
||||
RBB+0x20 RSS mask RW 0x00000000
|
||||
RBB+0x30 RSS mask RW 0x00000000
|
||||
======== ============================== =============
|
||||
|
@ -63,7 +63,7 @@ The NIC register space is constructed from a linked list of register blocks. Ea
|
||||
0x00000000 \- :ref:`rb_null`
|
||||
0xFFFFFFFF 0x00000100 :ref:`rb_fw_id`
|
||||
0x0000C000 0x00000100 :ref:`rb_if`
|
||||
0x0000C001 0x00000200 :ref:`rb_if_ctrl`
|
||||
0x0000C001 0x00000300 :ref:`rb_if_ctrl`
|
||||
0x0000C002 0x00000200 port
|
||||
0x0000C003 0x00000100 :ref:`rb_sched_block`
|
||||
0x0000C004 0x00000100 stats
|
||||
|
@ -379,9 +379,9 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
await interface.ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
@ -509,16 +509,16 @@ async def run_test_nic(dut):
|
||||
|
||||
tb.loopback_enable = False
|
||||
|
||||
if len(tb.driver.interfaces[0].ports) > 1:
|
||||
tb.log.info("All interface 0 ports")
|
||||
if len(tb.driver.interfaces[0].sched_blocks) > 1:
|
||||
tb.log.info("All interface 0 scheduler blocks")
|
||||
|
||||
for port in tb.driver.interfaces[0].ports:
|
||||
await port.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(port.interface.tx_queue_count):
|
||||
if k % len(tb.driver.interfaces[0].ports) == port.index:
|
||||
await port.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
await port.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
|
||||
|
||||
count = 64
|
||||
|
||||
@ -527,7 +527,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
for k, p in enumerate(pkts):
|
||||
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].ports))
|
||||
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].sched_blocks))
|
||||
|
||||
for k in range(count):
|
||||
pkt = await tb.driver.interfaces[0].recv()
|
||||
@ -538,8 +538,8 @@ async def run_test_nic(dut):
|
||||
|
||||
tb.loopback_enable = False
|
||||
|
||||
for port in tb.driver.interfaces[0].ports[1:]:
|
||||
await port.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
|
||||
for block in tb.driver.interfaces[0].sched_blocks[1:]:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
|
||||
|
||||
tb.log.info("Read statistics counters")
|
||||
|
||||
|
@ -944,7 +944,7 @@ always @(posedge clk) begin
|
||||
// Interface control
|
||||
RBB+8'h18: tx_mtu_reg <= ctrl_reg_wr_data; // IF ctrl: TX MTU
|
||||
RBB+8'h1C: rx_mtu_reg <= ctrl_reg_wr_data; // IF ctrl: RX MTU
|
||||
RBB+8'h20: rss_mask_reg <= ctrl_reg_wr_data; // IF ctrl: RSS mask
|
||||
RBB+8'h30: rss_mask_reg <= ctrl_reg_wr_data; // IF ctrl: RSS mask
|
||||
default: ctrl_reg_wr_ack_reg <= 1'b0;
|
||||
endcase
|
||||
end
|
||||
@ -955,7 +955,7 @@ always @(posedge clk) begin
|
||||
case ({ctrl_reg_rd_addr >> 2, 2'b00})
|
||||
// Interface control
|
||||
RBB+8'h00: ctrl_reg_rd_data_reg <= 32'h0000C001; // IF ctrl: Type
|
||||
RBB+8'h04: ctrl_reg_rd_data_reg <= 32'h00000200; // IF ctrl: Version
|
||||
RBB+8'h04: ctrl_reg_rd_data_reg <= 32'h00000300; // IF ctrl: Version
|
||||
RBB+8'h08: ctrl_reg_rd_data_reg <= RB_BASE_ADDR+8'h40; // IF ctrl: Next header
|
||||
RBB+8'h0C: begin
|
||||
// IF ctrl: features
|
||||
@ -965,11 +965,13 @@ always @(posedge clk) begin
|
||||
ctrl_reg_rd_data_reg[9] <= RX_CHECKSUM_ENABLE;
|
||||
ctrl_reg_rd_data_reg[10] <= RX_HASH_ENABLE;
|
||||
end
|
||||
RBB+8'h10: ctrl_reg_rd_data_reg <= MAX_TX_SIZE; // IF ctrl: Max TX MTU
|
||||
RBB+8'h14: ctrl_reg_rd_data_reg <= MAX_RX_SIZE; // IF ctrl: Max RX MTU
|
||||
RBB+8'h18: ctrl_reg_rd_data_reg <= tx_mtu_reg; // IF ctrl: TX MTU
|
||||
RBB+8'h1C: ctrl_reg_rd_data_reg <= rx_mtu_reg; // IF ctrl: RX MTU
|
||||
RBB+8'h20: ctrl_reg_rd_data_reg <= rss_mask_reg; // IF ctrl: RSS mask
|
||||
RBB+8'h10: ctrl_reg_rd_data_reg <= PORTS; // IF ctrl: Port count
|
||||
RBB+8'h14: ctrl_reg_rd_data_reg <= SCHEDULERS; // IF ctrl: Scheduler count
|
||||
RBB+8'h20: ctrl_reg_rd_data_reg <= MAX_TX_SIZE; // IF ctrl: Max TX MTU
|
||||
RBB+8'h24: ctrl_reg_rd_data_reg <= MAX_RX_SIZE; // IF ctrl: Max RX MTU
|
||||
RBB+8'h28: ctrl_reg_rd_data_reg <= tx_mtu_reg; // IF ctrl: TX MTU
|
||||
RBB+8'h2C: ctrl_reg_rd_data_reg <= rx_mtu_reg; // IF ctrl: RX MTU
|
||||
RBB+8'h30: ctrl_reg_rd_data_reg <= rss_mask_reg; // IF ctrl: RSS mask
|
||||
// Queue manager (Event)
|
||||
RBB+8'h40: ctrl_reg_rd_data_reg <= 32'h0000C010; // Event QM: Type
|
||||
RBB+8'h44: ctrl_reg_rd_data_reg <= 32'h00000100; // Event QM: Version
|
||||
|
@ -153,13 +153,15 @@ MQNIC_RB_IF_REG_STRIDE = 0x14
|
||||
MQNIC_RB_IF_REG_CSR_OFFSET = 0x18
|
||||
|
||||
MQNIC_RB_IF_CTRL_TYPE = 0x0000C001
|
||||
MQNIC_RB_IF_CTRL_VER = 0x00000200
|
||||
MQNIC_RB_IF_CTRL_VER = 0x00000300
|
||||
MQNIC_RB_IF_CTRL_REG_FEATURES = 0x0C
|
||||
MQNIC_RB_IF_CTRL_REG_MAX_TX_MTU = 0x10
|
||||
MQNIC_RB_IF_CTRL_REG_MAX_RX_MTU = 0x14
|
||||
MQNIC_RB_IF_CTRL_REG_TX_MTU = 0x18
|
||||
MQNIC_RB_IF_CTRL_REG_RX_MTU = 0x1C
|
||||
MQNIC_RB_IF_CTRL_REG_RSS_MASK = 0x20
|
||||
MQNIC_RB_IF_CTRL_REG_PORT_COUNT = 0x10
|
||||
MQNIC_RB_IF_CTRL_REG_SCHED_COUNT = 0x14
|
||||
MQNIC_RB_IF_CTRL_REG_MAX_TX_MTU = 0x20
|
||||
MQNIC_RB_IF_CTRL_REG_MAX_RX_MTU = 0x24
|
||||
MQNIC_RB_IF_CTRL_REG_TX_MTU = 0x28
|
||||
MQNIC_RB_IF_CTRL_REG_RX_MTU = 0x2C
|
||||
MQNIC_RB_IF_CTRL_REG_RSS_MASK = 0x30
|
||||
|
||||
MQNIC_IF_FEATURE_RSS = (1 << 0)
|
||||
MQNIC_IF_FEATURE_PTP_TS = (1 << 4)
|
||||
@ -744,7 +746,7 @@ class SchedulerControlTdma(BaseScheduler):
|
||||
self.hw_regs = self.rb.parent.create_window(offset)
|
||||
|
||||
|
||||
class Port:
|
||||
class SchedulerBlock:
|
||||
def __init__(self, interface, index, rb):
|
||||
self.interface = interface
|
||||
self.log = interface.log
|
||||
@ -823,6 +825,7 @@ class Interface:
|
||||
self.rx_cpl_queue_stride = None
|
||||
|
||||
self.port_count = None
|
||||
self.sched_block_count = None
|
||||
|
||||
self.event_queues = []
|
||||
|
||||
@ -830,7 +833,7 @@ class Interface:
|
||||
self.tx_cpl_queues = []
|
||||
self.rx_queues = []
|
||||
self.rx_cpl_queues = []
|
||||
self.ports = []
|
||||
self.sched_blocks = []
|
||||
|
||||
self.interrupt_running = False
|
||||
self.interrupt_pending = 0
|
||||
@ -847,10 +850,14 @@ class Interface:
|
||||
self.if_ctrl_rb = self.reg_blocks.find(MQNIC_RB_IF_CTRL_TYPE, MQNIC_RB_IF_CTRL_VER)
|
||||
|
||||
self.if_features = await self.if_ctrl_rb.read_dword(MQNIC_RB_IF_CTRL_REG_FEATURES)
|
||||
self.port_count = await self.if_ctrl_rb.read_dword(MQNIC_RB_IF_CTRL_REG_PORT_COUNT)
|
||||
self.sched_block_count = await self.if_ctrl_rb.read_dword(MQNIC_RB_IF_CTRL_REG_SCHED_COUNT)
|
||||
self.max_tx_mtu = await self.if_ctrl_rb.read_dword(MQNIC_RB_IF_CTRL_REG_MAX_TX_MTU)
|
||||
self.max_rx_mtu = await self.if_ctrl_rb.read_dword(MQNIC_RB_IF_CTRL_REG_MAX_RX_MTU)
|
||||
|
||||
self.log.info("IF features: 0x%08x", self.if_features)
|
||||
self.log.info("Port count: %d", self.port_count)
|
||||
self.log.info("Scheduler block count: %d", self.sched_block_count)
|
||||
self.log.info("Max TX MTU: %d", self.max_tx_mtu)
|
||||
self.log.info("Max RX MTU: %d", self.max_rx_mtu)
|
||||
|
||||
@ -954,19 +961,14 @@ class Interface:
|
||||
await q.init()
|
||||
self.rx_cpl_queues.append(q)
|
||||
|
||||
self.port_count = 0
|
||||
while True:
|
||||
rb = self.reg_blocks.find(MQNIC_RB_SCHED_BLOCK_TYPE, MQNIC_RB_SCHED_BLOCK_VER, index=self.port_count)
|
||||
if not rb:
|
||||
break
|
||||
for k in range(self.sched_block_count):
|
||||
rb = self.reg_blocks.find(MQNIC_RB_SCHED_BLOCK_TYPE, MQNIC_RB_SCHED_BLOCK_VER, index=k)
|
||||
|
||||
p = Port(self, self.port_count, rb)
|
||||
await p.init()
|
||||
self.ports.append(p)
|
||||
s = SchedulerBlock(self, k, rb)
|
||||
await s.init()
|
||||
self.sched_blocks.append(s)
|
||||
|
||||
self.port_count += 1
|
||||
|
||||
self.log.info("Port count: %d", self.port_count)
|
||||
assert self.sched_block_count == len(self.sched_blocks)
|
||||
|
||||
# wait for all writes to complete
|
||||
await self.hw_regs.read_dword(0)
|
||||
|
@ -193,9 +193,9 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
await interface.ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
@ -323,16 +323,16 @@ async def run_test_nic(dut):
|
||||
|
||||
tb.loopback_enable = False
|
||||
|
||||
if len(tb.driver.interfaces[0].ports) > 1:
|
||||
tb.log.info("All interface 0 ports")
|
||||
if len(tb.driver.interfaces[0].sched_blocks) > 1:
|
||||
tb.log.info("All interface 0 scheduler blocks")
|
||||
|
||||
for port in tb.driver.interfaces[0].ports:
|
||||
await port.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(port.interface.tx_queue_count):
|
||||
if k % len(tb.driver.interfaces[0].ports) == port.index:
|
||||
await port.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
await port.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
|
||||
|
||||
count = 64
|
||||
|
||||
@ -341,7 +341,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
for k, p in enumerate(pkts):
|
||||
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].ports))
|
||||
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].sched_blocks))
|
||||
|
||||
for k in range(count):
|
||||
pkt = await tb.driver.interfaces[0].recv()
|
||||
@ -352,8 +352,8 @@ async def run_test_nic(dut):
|
||||
|
||||
tb.loopback_enable = False
|
||||
|
||||
for port in tb.driver.interfaces[0].ports[1:]:
|
||||
await port.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
|
||||
for block in tb.driver.interfaces[0].sched_blocks[1:]:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
|
||||
|
||||
tb.log.info("Read statistics counters")
|
||||
|
||||
|
@ -304,9 +304,9 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
await interface.ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
@ -434,16 +434,16 @@ async def run_test_nic(dut):
|
||||
|
||||
tb.loopback_enable = False
|
||||
|
||||
if len(tb.driver.interfaces[0].ports) > 1:
|
||||
tb.log.info("All interface 0 ports")
|
||||
if len(tb.driver.interfaces[0].sched_blocks) > 1:
|
||||
tb.log.info("All interface 0 scheduler blocks")
|
||||
|
||||
for port in tb.driver.interfaces[0].ports:
|
||||
await port.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(port.interface.tx_queue_count):
|
||||
if k % len(tb.driver.interfaces[0].ports) == port.index:
|
||||
await port.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
await port.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
|
||||
|
||||
count = 64
|
||||
|
||||
@ -452,7 +452,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
for k, p in enumerate(pkts):
|
||||
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].ports))
|
||||
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].sched_blocks))
|
||||
|
||||
for k in range(count):
|
||||
pkt = await tb.driver.interfaces[0].recv()
|
||||
@ -463,8 +463,8 @@ async def run_test_nic(dut):
|
||||
|
||||
tb.loopback_enable = False
|
||||
|
||||
for port in tb.driver.interfaces[0].ports[1:]:
|
||||
await port.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
|
||||
for block in tb.driver.interfaces[0].sched_blocks[1:]:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
|
||||
|
||||
tb.log.info("Read statistics counters")
|
||||
|
||||
|
@ -379,9 +379,9 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
await interface.ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
@ -509,16 +509,16 @@ async def run_test_nic(dut):
|
||||
|
||||
tb.loopback_enable = False
|
||||
|
||||
if len(tb.driver.interfaces[0].ports) > 1:
|
||||
tb.log.info("All interface 0 ports")
|
||||
if len(tb.driver.interfaces[0].sched_blocks) > 1:
|
||||
tb.log.info("All interface 0 scheduler blocks")
|
||||
|
||||
for port in tb.driver.interfaces[0].ports:
|
||||
await port.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(port.interface.tx_queue_count):
|
||||
if k % len(tb.driver.interfaces[0].ports) == port.index:
|
||||
await port.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
await port.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
|
||||
|
||||
count = 64
|
||||
|
||||
@ -527,7 +527,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
for k, p in enumerate(pkts):
|
||||
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].ports))
|
||||
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].sched_blocks))
|
||||
|
||||
for k in range(count):
|
||||
pkt = await tb.driver.interfaces[0].recv()
|
||||
@ -538,8 +538,8 @@ async def run_test_nic(dut):
|
||||
|
||||
tb.loopback_enable = False
|
||||
|
||||
for port in tb.driver.interfaces[0].ports[1:]:
|
||||
await port.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
|
||||
for block in tb.driver.interfaces[0].sched_blocks[1:]:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
|
||||
|
||||
tb.log.info("Read statistics counters")
|
||||
|
||||
|
@ -379,9 +379,9 @@ async def run_test_nic(dut):
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
for interface in tb.driver.interfaces:
|
||||
await interface.ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(interface.tx_queue_count):
|
||||
await interface.ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
@ -509,16 +509,16 @@ async def run_test_nic(dut):
|
||||
|
||||
tb.loopback_enable = False
|
||||
|
||||
if len(tb.driver.interfaces[0].ports) > 1:
|
||||
tb.log.info("All interface 0 ports")
|
||||
if len(tb.driver.interfaces[0].sched_blocks) > 1:
|
||||
tb.log.info("All interface 0 scheduler blocks")
|
||||
|
||||
for port in tb.driver.interfaces[0].ports:
|
||||
await port.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(port.interface.tx_queue_count):
|
||||
if k % len(tb.driver.interfaces[0].ports) == port.index:
|
||||
await port.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
for block in tb.driver.interfaces[0].sched_blocks:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(block.interface.tx_queue_count):
|
||||
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
else:
|
||||
await port.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
|
||||
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
|
||||
|
||||
count = 64
|
||||
|
||||
@ -527,7 +527,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
for k, p in enumerate(pkts):
|
||||
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].ports))
|
||||
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].sched_blocks))
|
||||
|
||||
for k in range(count):
|
||||
pkt = await tb.driver.interfaces[0].recv()
|
||||
@ -538,8 +538,8 @@ async def run_test_nic(dut):
|
||||
|
||||
tb.loopback_enable = False
|
||||
|
||||
for port in tb.driver.interfaces[0].ports[1:]:
|
||||
await port.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
|
||||
for block in tb.driver.interfaces[0].sched_blocks[1:]:
|
||||
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
|
||||
|
||||
await Timer(1000, 'ns')
|
||||
|
||||
@ -552,7 +552,7 @@ async def run_test_nic(dut):
|
||||
tb.loopback_enable = True
|
||||
|
||||
# configure TDMA scheduler
|
||||
tdma_sch_rb = tb.driver.interfaces[0].ports[0].reg_blocks.find(mqnic.MQNIC_RB_TDMA_SCH_TYPE, mqnic.MQNIC_RB_TDMA_SCH_VER, 0)
|
||||
tdma_sch_rb = tb.driver.interfaces[0].sched_blocks[0].reg_blocks.find(mqnic.MQNIC_RB_TDMA_SCH_TYPE, mqnic.MQNIC_RB_TDMA_SCH_VER, 0)
|
||||
await tdma_sch_rb.write_dword(mqnic.MQNIC_RB_TDMA_SCH_REG_SCH_PERIOD_FNS, 0)
|
||||
await tdma_sch_rb.write_dword(mqnic.MQNIC_RB_TDMA_SCH_REG_SCH_PERIOD_NS, 40000)
|
||||
await tdma_sch_rb.write_dword(mqnic.MQNIC_RB_TDMA_SCH_REG_SCH_PERIOD_SEC_L, 0)
|
||||
@ -568,15 +568,15 @@ async def run_test_nic(dut):
|
||||
await tdma_sch_rb.write_dword(mqnic.MQNIC_RB_TDMA_SCH_REG_CTRL, 0x00000001)
|
||||
|
||||
# enable queues with global enable off
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000001)
|
||||
|
||||
# configure slots
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[1].hw_regs.write_dword(8*0, 0x00000001)
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[1].hw_regs.write_dword(8*1, 0x00000002)
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[1].hw_regs.write_dword(8*2, 0x00000004)
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[1].hw_regs.write_dword(8*3, 0x00000008)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[1].hw_regs.write_dword(8*0, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[1].hw_regs.write_dword(8*1, 0x00000002)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[1].hw_regs.write_dword(8*2, 0x00000004)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[1].hw_regs.write_dword(8*3, 0x00000008)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -376,9 +376,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -448,9 +448,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -376,9 +376,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -448,9 +448,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -376,9 +376,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -448,9 +448,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -365,9 +365,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -437,9 +437,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -338,9 +338,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -374,9 +374,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -350,9 +350,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -359,9 +359,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -360,9 +360,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -357,9 +357,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -381,9 +381,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -380,9 +380,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -452,9 +452,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -374,9 +374,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -446,9 +446,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -353,9 +353,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -378,9 +378,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -450,9 +450,9 @@ async def run_test_nic(dut):
|
||||
|
||||
# enable queues
|
||||
tb.log.info("Enable queues")
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
|
||||
for k in range(tb.driver.interfaces[0].tx_queue_count):
|
||||
await tb.driver.interfaces[0].ports[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
await tb.driver.interfaces[0].sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
|
||||
|
||||
# wait for all writes to complete
|
||||
await tb.driver.hw_regs.read_dword(0)
|
||||
|
@ -8,7 +8,7 @@ mqnic-y += mqnic_irq.o
|
||||
mqnic-y += mqnic_dev.o
|
||||
mqnic-y += mqnic_if.o
|
||||
mqnic-y += mqnic_netdev.o
|
||||
mqnic-y += mqnic_port.o
|
||||
mqnic-y += mqnic_sched_block.o
|
||||
mqnic-y += mqnic_scheduler.o
|
||||
mqnic-y += mqnic_ptp.o
|
||||
mqnic-y += mqnic_i2c.o
|
||||
|
@ -303,7 +303,7 @@ struct mqnic_eq_ring {
|
||||
struct mqnic_sched {
|
||||
struct device *dev;
|
||||
struct mqnic_if *interface;
|
||||
struct mqnic_port *port;
|
||||
struct mqnic_sched_block *sched_block;
|
||||
|
||||
struct reg_block *rb;
|
||||
|
||||
@ -317,7 +317,7 @@ struct mqnic_sched {
|
||||
u8 __iomem *hw_addr;
|
||||
};
|
||||
|
||||
struct mqnic_port {
|
||||
struct mqnic_sched_block {
|
||||
struct device *dev;
|
||||
struct mqnic_if *interface;
|
||||
|
||||
@ -381,9 +381,9 @@ struct mqnic_if {
|
||||
struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS];
|
||||
|
||||
u32 port_count;
|
||||
u32 port_offset;
|
||||
u32 port_stride;
|
||||
struct mqnic_port *port[MQNIC_MAX_PORTS];
|
||||
u32 sched_block_count;
|
||||
|
||||
struct mqnic_sched_block *sched_block[MQNIC_MAX_PORTS];
|
||||
|
||||
u32 max_desc_block_size;
|
||||
|
||||
@ -426,8 +426,8 @@ struct mqnic_priv {
|
||||
u32 rx_cpl_queue_count;
|
||||
struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS];
|
||||
|
||||
u32 port_count;
|
||||
struct mqnic_port *port[MQNIC_MAX_PORTS];
|
||||
u32 sched_block_count;
|
||||
struct mqnic_sched_block *sched_block[MQNIC_MAX_PORTS];
|
||||
|
||||
u32 max_desc_block_size;
|
||||
|
||||
@ -467,15 +467,15 @@ int mqnic_create_netdev(struct mqnic_if *interface, struct net_device **ndev_ptr
|
||||
int index, int dev_port);
|
||||
void mqnic_destroy_netdev(struct net_device **ndev_ptr);
|
||||
|
||||
// mqnic_port.c
|
||||
int mqnic_create_port(struct mqnic_if *interface, struct mqnic_port **port_ptr,
|
||||
// mqnic_sched_block.c
|
||||
int mqnic_create_sched_block(struct mqnic_if *interface, struct mqnic_sched_block **block_ptr,
|
||||
int index, struct reg_block *rb);
|
||||
void mqnic_destroy_port(struct mqnic_port **port_ptr);
|
||||
int mqnic_activate_port(struct mqnic_port *port);
|
||||
void mqnic_deactivate_port(struct mqnic_port *port);
|
||||
void mqnic_destroy_sched_block(struct mqnic_sched_block **block_ptr);
|
||||
int mqnic_activate_sched_block(struct mqnic_sched_block *block);
|
||||
void mqnic_deactivate_sched_block(struct mqnic_sched_block *block);
|
||||
|
||||
// mqnic_scheduler.c
|
||||
int mqnic_create_scheduler(struct mqnic_port *port, struct mqnic_sched **sched_ptr,
|
||||
int mqnic_create_scheduler(struct mqnic_sched_block *block, struct mqnic_sched **sched_ptr,
|
||||
int index, struct reg_block *rb);
|
||||
void mqnic_destroy_scheduler(struct mqnic_sched **sched_ptr);
|
||||
int mqnic_scheduler_enable(struct mqnic_sched *sched);
|
||||
|
@ -177,13 +177,15 @@
|
||||
#define MQNIC_RB_IF_REG_CSR_OFFSET 0x18
|
||||
|
||||
#define MQNIC_RB_IF_CTRL_TYPE 0x0000C001
|
||||
#define MQNIC_RB_IF_CTRL_VER 0x00000200
|
||||
#define MQNIC_RB_IF_CTRL_VER 0x00000300
|
||||
#define MQNIC_RB_IF_CTRL_REG_FEATURES 0x0C
|
||||
#define MQNIC_RB_IF_CTRL_REG_MAX_TX_MTU 0x10
|
||||
#define MQNIC_RB_IF_CTRL_REG_MAX_RX_MTU 0x14
|
||||
#define MQNIC_RB_IF_CTRL_REG_TX_MTU 0x18
|
||||
#define MQNIC_RB_IF_CTRL_REG_RX_MTU 0x1C
|
||||
#define MQNIC_RB_IF_CTRL_REG_RSS_MASK 0x20
|
||||
#define MQNIC_RB_IF_CTRL_REG_PORT_COUNT 0x10
|
||||
#define MQNIC_RB_IF_CTRL_REG_SCHED_COUNT 0x14
|
||||
#define MQNIC_RB_IF_CTRL_REG_MAX_TX_MTU 0x20
|
||||
#define MQNIC_RB_IF_CTRL_REG_MAX_RX_MTU 0x24
|
||||
#define MQNIC_RB_IF_CTRL_REG_TX_MTU 0x28
|
||||
#define MQNIC_RB_IF_CTRL_REG_RX_MTU 0x2C
|
||||
#define MQNIC_RB_IF_CTRL_REG_RSS_MASK 0x30
|
||||
|
||||
#define MQNIC_IF_FEATURE_RSS (1 << 0)
|
||||
#define MQNIC_IF_FEATURE_PTP_TS (1 << 4)
|
||||
|
@ -82,10 +82,14 @@ int mqnic_create_interface(struct mqnic_dev *mdev, struct mqnic_if **interface_p
|
||||
}
|
||||
|
||||
interface->if_features = ioread32(interface->if_ctrl_rb->regs + MQNIC_RB_IF_CTRL_REG_FEATURES);
|
||||
interface->port_count = ioread32(interface->if_ctrl_rb->regs + MQNIC_RB_IF_CTRL_REG_PORT_COUNT);
|
||||
interface->sched_block_count = ioread32(interface->if_ctrl_rb->regs + MQNIC_RB_IF_CTRL_REG_SCHED_COUNT);
|
||||
interface->max_tx_mtu = ioread32(interface->if_ctrl_rb->regs + MQNIC_RB_IF_CTRL_REG_MAX_TX_MTU);
|
||||
interface->max_rx_mtu = ioread32(interface->if_ctrl_rb->regs + MQNIC_RB_IF_CTRL_REG_MAX_RX_MTU);
|
||||
|
||||
dev_info(dev, "IF features: 0x%08x", interface->if_features);
|
||||
dev_info(dev, "Port count: %d", interface->port_count);
|
||||
dev_info(dev, "Scheduler block count: %d", interface->sched_block_count);
|
||||
dev_info(dev, "Max TX MTU: %d", interface->max_tx_mtu);
|
||||
dev_info(dev, "Max RX MTU: %d", interface->max_rx_mtu);
|
||||
|
||||
@ -234,25 +238,22 @@ int mqnic_create_interface(struct mqnic_dev *mdev, struct mqnic_if **interface_p
|
||||
goto fail;
|
||||
}
|
||||
|
||||
// create ports
|
||||
interface->port_count = 0;
|
||||
while (interface->port_count < MQNIC_MAX_PORTS)
|
||||
{
|
||||
struct reg_block *sched_block_rb = find_reg_block(interface->rb_list, MQNIC_RB_SCHED_BLOCK_TYPE, MQNIC_RB_SCHED_BLOCK_VER, interface->port_count);
|
||||
// create schedulers
|
||||
for (k = 0; k < interface->sched_block_count; k++) {
|
||||
struct reg_block *sched_block_rb = find_reg_block(interface->rb_list, MQNIC_RB_SCHED_BLOCK_TYPE, MQNIC_RB_SCHED_BLOCK_VER, k);
|
||||
|
||||
if (!sched_block_rb)
|
||||
break;
|
||||
if (!sched_block_rb) {
|
||||
ret = -EIO;
|
||||
dev_err(dev, "Scheduler block index %d not found", k);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = mqnic_create_port(interface, &interface->port[interface->port_count],
|
||||
interface->port_count, sched_block_rb);
|
||||
ret = mqnic_create_sched_block(interface, &interface->sched_block[k],
|
||||
k, sched_block_rb);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
interface->port_count++;
|
||||
}
|
||||
|
||||
dev_info(dev, "Port count: %d", interface->port_count);
|
||||
|
||||
// create net_devices
|
||||
interface->dev_port_base = mdev->dev_port_max;
|
||||
interface->dev_port_max = mdev->dev_port_max;
|
||||
@ -302,10 +303,10 @@ void mqnic_destroy_interface(struct mqnic_if **interface_ptr)
|
||||
if (interface->rx_cpl_ring[k])
|
||||
mqnic_destroy_cq_ring(&interface->rx_cpl_ring[k]);
|
||||
|
||||
// free ports
|
||||
for (k = 0; k < ARRAY_SIZE(interface->port); k++)
|
||||
if (interface->port[k])
|
||||
mqnic_destroy_port(&interface->port[k]);
|
||||
// free schedulers
|
||||
for (k = 0; k < ARRAY_SIZE(interface->sched_block); k++)
|
||||
if (interface->sched_block[k])
|
||||
mqnic_destroy_sched_block(&interface->sched_block[k]);
|
||||
|
||||
if (interface->rb_list)
|
||||
free_reg_block_list(interface->rb_list);
|
||||
|
@ -88,8 +88,8 @@ static int mqnic_start_port(struct net_device *ndev)
|
||||
// configure RSS
|
||||
mqnic_interface_set_rss_mask(priv->interface, 0xffffffff);
|
||||
|
||||
// enable first port
|
||||
mqnic_activate_port(priv->port[0]);
|
||||
// enable first scheduler
|
||||
mqnic_activate_sched_block(priv->sched_block[0]);
|
||||
|
||||
priv->port_up = true;
|
||||
|
||||
@ -123,9 +123,9 @@ static int mqnic_stop_port(struct net_device *ndev)
|
||||
priv->port_up = false;
|
||||
spin_unlock_bh(&priv->stats_lock);
|
||||
|
||||
// disable ports
|
||||
for (k = 0; k < priv->port_count; k++)
|
||||
mqnic_deactivate_port(priv->port[k]);
|
||||
// disable schedulers
|
||||
for (k = 0; k < priv->sched_block_count; k++)
|
||||
mqnic_deactivate_sched_block(priv->sched_block[k]);
|
||||
|
||||
// deactivate TX queues
|
||||
for (k = 0; k < min(priv->tx_queue_count, priv->tx_cpl_queue_count); k++) {
|
||||
@ -405,9 +405,9 @@ int mqnic_create_netdev(struct mqnic_if *interface, struct net_device **ndev_ptr
|
||||
for (k = 0; k < interface->rx_cpl_queue_count; k++)
|
||||
priv->rx_cpl_ring[k] = interface->rx_cpl_ring[k];
|
||||
|
||||
priv->port_count = interface->port_count;
|
||||
for (k = 0; k < interface->port_count; k++)
|
||||
priv->port[k] = interface->port[k];
|
||||
priv->sched_block_count = interface->sched_block_count;
|
||||
for (k = 0; k < interface->sched_block_count; k++)
|
||||
priv->sched_block[k] = interface->sched_block[k];
|
||||
|
||||
netif_set_real_num_tx_queues(ndev, priv->tx_queue_count);
|
||||
netif_set_real_num_rx_queues(ndev, priv->rx_queue_count);
|
||||
|
@ -35,105 +35,105 @@
|
||||
|
||||
#include "mqnic.h"
|
||||
|
||||
int mqnic_create_port(struct mqnic_if *interface, struct mqnic_port **port_ptr,
|
||||
int mqnic_create_sched_block(struct mqnic_if *interface, struct mqnic_sched_block **block_ptr,
|
||||
int index, struct reg_block *block_rb)
|
||||
{
|
||||
struct device *dev = interface->dev;
|
||||
struct mqnic_port *port;
|
||||
struct mqnic_sched_block *block;
|
||||
struct reg_block *rb;
|
||||
u32 offset;
|
||||
int ret = 0;
|
||||
|
||||
port = kzalloc(sizeof(*port), GFP_KERNEL);
|
||||
if (!port)
|
||||
block = kzalloc(sizeof(*block), GFP_KERNEL);
|
||||
if (!block)
|
||||
return -ENOMEM;
|
||||
|
||||
*port_ptr = port;
|
||||
*block_ptr = block;
|
||||
|
||||
port->dev = dev;
|
||||
port->interface = interface;
|
||||
block->dev = dev;
|
||||
block->interface = interface;
|
||||
|
||||
port->index = index;
|
||||
block->index = index;
|
||||
|
||||
port->tx_queue_count = interface->tx_queue_count;
|
||||
block->tx_queue_count = interface->tx_queue_count;
|
||||
|
||||
port->block_rb = block_rb;
|
||||
block->block_rb = block_rb;
|
||||
|
||||
offset = ioread32(block_rb->regs + MQNIC_RB_SCHED_BLOCK_REG_OFFSET);
|
||||
|
||||
port->rb_list = enumerate_reg_block_list(interface->hw_addr, offset, interface->hw_regs_size - offset);
|
||||
block->rb_list = enumerate_reg_block_list(interface->hw_addr, offset, interface->hw_regs_size - offset);
|
||||
|
||||
if (!port->rb_list) {
|
||||
if (!block->rb_list) {
|
||||
ret = -EIO;
|
||||
dev_err(dev, "Failed to enumerate blocks");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
dev_info(dev, "Port-level register blocks:");
|
||||
for (rb = port->rb_list; rb->type && rb->version; rb++)
|
||||
dev_info(dev, "Scheduler block-level register blocks:");
|
||||
for (rb = block->rb_list; rb->type && rb->version; rb++)
|
||||
dev_info(dev, " type 0x%08x (v %d.%d.%d.%d)", rb->type, rb->version >> 24,
|
||||
(rb->version >> 16) & 0xff, (rb->version >> 8) & 0xff, rb->version & 0xff);
|
||||
|
||||
port->sched_count = 0;
|
||||
for (rb = port->rb_list; rb->type && rb->version; rb++) {
|
||||
block->sched_count = 0;
|
||||
for (rb = block->rb_list; rb->type && rb->version; rb++) {
|
||||
if (rb->type == MQNIC_RB_SCHED_RR_TYPE && rb->version == MQNIC_RB_SCHED_RR_VER) {
|
||||
ret = mqnic_create_scheduler(port, &port->sched[port->sched_count],
|
||||
port->sched_count, rb);
|
||||
ret = mqnic_create_scheduler(block, &block->sched[block->sched_count],
|
||||
block->sched_count, rb);
|
||||
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
port->sched_count++;
|
||||
block->sched_count++;
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(dev, "Scheduler count: %d", port->sched_count);
|
||||
dev_info(dev, "Scheduler count: %d", block->sched_count);
|
||||
|
||||
mqnic_deactivate_port(port);
|
||||
mqnic_deactivate_sched_block(block);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
mqnic_destroy_port(port_ptr);
|
||||
mqnic_destroy_sched_block(block_ptr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mqnic_destroy_port(struct mqnic_port **port_ptr)
|
||||
void mqnic_destroy_sched_block(struct mqnic_sched_block **block_ptr)
|
||||
{
|
||||
struct mqnic_port *port = *port_ptr;
|
||||
struct mqnic_sched_block *block = *block_ptr;
|
||||
int k;
|
||||
|
||||
mqnic_deactivate_port(port);
|
||||
mqnic_deactivate_sched_block(block);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(port->sched); k++)
|
||||
if (port->sched[k])
|
||||
mqnic_destroy_scheduler(&port->sched[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(block->sched); k++)
|
||||
if (block->sched[k])
|
||||
mqnic_destroy_scheduler(&block->sched[k]);
|
||||
|
||||
if (port->rb_list)
|
||||
free_reg_block_list(port->rb_list);
|
||||
if (block->rb_list)
|
||||
free_reg_block_list(block->rb_list);
|
||||
|
||||
*port_ptr = NULL;
|
||||
kfree(port);
|
||||
*block_ptr = NULL;
|
||||
kfree(block);
|
||||
}
|
||||
|
||||
int mqnic_activate_port(struct mqnic_port *port)
|
||||
int mqnic_activate_sched_block(struct mqnic_sched_block *block)
|
||||
{
|
||||
int k;
|
||||
|
||||
// enable schedulers
|
||||
for (k = 0; k < ARRAY_SIZE(port->sched); k++)
|
||||
if (port->sched[k])
|
||||
mqnic_scheduler_enable(port->sched[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(block->sched); k++)
|
||||
if (block->sched[k])
|
||||
mqnic_scheduler_enable(block->sched[k]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mqnic_deactivate_port(struct mqnic_port *port)
|
||||
void mqnic_deactivate_sched_block(struct mqnic_sched_block *block)
|
||||
{
|
||||
int k;
|
||||
|
||||
// disable schedulers
|
||||
for (k = 0; k < ARRAY_SIZE(port->sched); k++)
|
||||
if (port->sched[k])
|
||||
mqnic_scheduler_disable(port->sched[k]);
|
||||
for (k = 0; k < ARRAY_SIZE(block->sched); k++)
|
||||
if (block->sched[k])
|
||||
mqnic_scheduler_disable(block->sched[k]);
|
||||
}
|
@ -35,10 +35,10 @@
|
||||
|
||||
#include "mqnic.h"
|
||||
|
||||
int mqnic_create_scheduler(struct mqnic_port *port, struct mqnic_sched **sched_ptr,
|
||||
int mqnic_create_scheduler(struct mqnic_sched_block *block, struct mqnic_sched **sched_ptr,
|
||||
int index, struct reg_block *rb)
|
||||
{
|
||||
struct device *dev = port->dev;
|
||||
struct device *dev = block->dev;
|
||||
struct mqnic_sched *sched;
|
||||
|
||||
sched = kzalloc(sizeof(*sched), GFP_KERNEL);
|
||||
@ -48,8 +48,8 @@ int mqnic_create_scheduler(struct mqnic_port *port, struct mqnic_sched **sched_p
|
||||
*sched_ptr = sched;
|
||||
|
||||
sched->dev = dev;
|
||||
sched->interface = port->interface;
|
||||
sched->port = port;
|
||||
sched->interface = block->interface;
|
||||
sched->sched_block = block;
|
||||
|
||||
sched->index = index;
|
||||
|
||||
@ -60,7 +60,7 @@ int mqnic_create_scheduler(struct mqnic_port *port, struct mqnic_sched **sched_p
|
||||
sched->channel_count = ioread32(rb->regs + MQNIC_RB_SCHED_RR_REG_CH_COUNT);
|
||||
sched->channel_stride = ioread32(rb->regs + MQNIC_RB_SCHED_RR_REG_CH_STRIDE);
|
||||
|
||||
sched->hw_addr = port->interface->hw_addr + sched->offset;
|
||||
sched->hw_addr = block->interface->hw_addr + sched->offset;
|
||||
|
||||
dev_info(dev, "Scheduler type: 0x%08x", sched->type);
|
||||
dev_info(dev, "Scheduler offset: 0x%08x", sched->offset);
|
||||
|
@ -77,6 +77,7 @@ int main(int argc, char *argv[])
|
||||
struct mqnic *dev;
|
||||
int interface = 0;
|
||||
int port = 0;
|
||||
int sched_block = 0;
|
||||
|
||||
struct reg_block *rb;
|
||||
|
||||
@ -187,6 +188,8 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
printf("IF features: 0x%08x\n", dev_interface->if_features);
|
||||
printf("Port count: %d\n", dev_interface->port_count);
|
||||
printf("Scheduler block count: %d\n", dev_interface->sched_block_count);
|
||||
printf("Max TX MTU: %d\n", dev_interface->max_tx_mtu);
|
||||
printf("Max RX MTU: %d\n", dev_interface->max_rx_mtu);
|
||||
printf("TX MTU: %d\n", mqnic_reg_read32(dev_interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_TX_MTU));
|
||||
@ -212,25 +215,31 @@ int main(int argc, char *argv[])
|
||||
printf("RX completion queue count: %d\n", dev_interface->rx_cpl_queue_count);
|
||||
printf("RX completion queue stride: 0x%08x\n", dev_interface->rx_cpl_queue_stride);
|
||||
|
||||
printf("Port count: %d\n", dev_interface->port_count);
|
||||
|
||||
if (port < 0 || port >= dev_interface->port_count)
|
||||
{
|
||||
fprintf(stderr, "Port out of range\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
struct mqnic_port *dev_port = dev_interface->ports[port];
|
||||
sched_block = port;
|
||||
|
||||
if (!dev_port)
|
||||
if (sched_block < 0 || sched_block >= dev_interface->sched_block_count)
|
||||
{
|
||||
fprintf(stderr, "Invalid port\n");
|
||||
fprintf(stderr, "Scheduler block out of range\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
struct mqnic_sched_block *dev_sched_block = dev_interface->sched_blocks[sched_block];
|
||||
|
||||
if (!dev_sched_block)
|
||||
{
|
||||
fprintf(stderr, "Invalid scheduler block\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
printf("Sched count: %d\n", dev_port->sched_count);
|
||||
printf("Sched count: %d\n", dev_sched_block->sched_count);
|
||||
|
||||
rb = find_reg_block(dev_port->rb_list, MQNIC_RB_TDMA_SCH_TYPE, MQNIC_RB_TDMA_SCH_VER, 0);
|
||||
rb = find_reg_block(dev_sched_block->rb_list, MQNIC_RB_TDMA_SCH_TYPE, MQNIC_RB_TDMA_SCH_VER, 0);
|
||||
|
||||
if (dev->phc_rb && rb)
|
||||
{
|
||||
|
@ -57,6 +57,7 @@ int main(int argc, char *argv[])
|
||||
struct mqnic *dev;
|
||||
int interface = 0;
|
||||
int port = 0;
|
||||
int sched_block = 0;
|
||||
|
||||
name = strrchr(argv[0], '/');
|
||||
name = name ? 1+name : argv[0];
|
||||
@ -191,6 +192,8 @@ int main(int argc, char *argv[])
|
||||
(rb->version >> 16) & 0xff, (rb->version >> 8) & 0xff, rb->version & 0xff);
|
||||
|
||||
printf("IF features: 0x%08x\n", dev_interface->if_features);
|
||||
printf("Port count: %d\n", dev_interface->port_count);
|
||||
printf("Scheduler block count: %d\n", dev_interface->sched_block_count);
|
||||
printf("Max TX MTU: %d\n", dev_interface->max_tx_mtu);
|
||||
printf("Max RX MTU: %d\n", dev_interface->max_rx_mtu);
|
||||
printf("TX MTU: %d\n", mqnic_reg_read32(dev_interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_TX_MTU));
|
||||
@ -217,8 +220,6 @@ int main(int argc, char *argv[])
|
||||
printf("RX completion queue count: %d\n", dev_interface->rx_cpl_queue_count);
|
||||
printf("RX completion queue stride: 0x%08x\n", dev_interface->rx_cpl_queue_stride);
|
||||
|
||||
printf("Port count: %d\n", dev_interface->port_count);
|
||||
|
||||
if (port < 0 || port >= dev_interface->port_count)
|
||||
{
|
||||
fprintf(stderr, "Port out of range\n");
|
||||
@ -226,23 +227,32 @@ int main(int argc, char *argv[])
|
||||
goto err;
|
||||
}
|
||||
|
||||
struct mqnic_port *dev_port = dev_interface->ports[port];
|
||||
sched_block = port;
|
||||
|
||||
if (!dev_port)
|
||||
if (sched_block < 0 || sched_block >= dev_interface->sched_block_count)
|
||||
{
|
||||
fprintf(stderr, "Invalid port\n");
|
||||
fprintf(stderr, "Scheduler block out of range\n");
|
||||
ret = -1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
printf("Port-level register blocks:\n");
|
||||
for (struct reg_block *rb = dev_port->rb_list; rb->type && rb->version; rb++)
|
||||
struct mqnic_sched_block *dev_sched_block = dev_interface->sched_blocks[sched_block];
|
||||
|
||||
if (!dev_sched_block)
|
||||
{
|
||||
fprintf(stderr, "Invalid scheduler block\n");
|
||||
ret = -1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
printf("Scheduler block-level register blocks:\n");
|
||||
for (struct reg_block *rb = dev_sched_block->rb_list; rb->type && rb->version; rb++)
|
||||
printf(" type 0x%08x (v %d.%d.%d.%d)\n", rb->type, rb->version >> 24,
|
||||
(rb->version >> 16) & 0xff, (rb->version >> 8) & 0xff, rb->version & 0xff);
|
||||
|
||||
printf("Sched count: %d\n", dev_port->sched_count);
|
||||
printf("Sched count: %d\n", dev_sched_block->sched_count);
|
||||
|
||||
for (struct reg_block *rb = dev_port->rb_list; rb->type && rb->version; rb++)
|
||||
for (struct reg_block *rb = dev_sched_block->rb_list; rb->type && rb->version; rb++)
|
||||
{
|
||||
if (rb->type == MQNIC_RB_SCHED_RR_TYPE && rb->version == MQNIC_RB_SCHED_RR_VER)
|
||||
{
|
||||
@ -378,12 +388,12 @@ int main(int argc, char *argv[])
|
||||
printf("EQ %4d 0x%016lx %d %2d %d %d %4d %6d %6d %6d\n", k, base_addr, active, log_queue_size, armed, continuous, interrupt_index, head_ptr, tail_ptr, occupancy);
|
||||
}
|
||||
|
||||
for (int k = 0; k < dev_port->sched_count; k++)
|
||||
for (int k = 0; k < dev_sched_block->sched_count; k++)
|
||||
{
|
||||
printf("Port %d scheduler %d\n", port, k);
|
||||
printf("Scheduler block %d scheduler %d\n", sched_block, k);
|
||||
for (int l = 0; l < dev_interface->tx_queue_count; l++)
|
||||
{
|
||||
printf("Sched %2d queue %4d state: 0x%08x\n", k, l, mqnic_reg_read32(dev_port->sched[k]->regs, l*4));
|
||||
printf("Sched %2d queue %4d state: 0x%08x\n", k, l, mqnic_reg_read32(dev_sched_block->sched[k]->regs, l*4));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -519,6 +519,8 @@ struct mqnic_if *mqnic_if_open(struct mqnic *dev, int index, volatile uint8_t *r
|
||||
}
|
||||
|
||||
interface->if_features = mqnic_reg_read32(interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_FEATURES);
|
||||
interface->port_count = mqnic_reg_read32(interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_PORT_COUNT);
|
||||
interface->sched_block_count = mqnic_reg_read32(interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_SCHED_COUNT);
|
||||
interface->max_tx_mtu = mqnic_reg_read32(interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_MAX_TX_MTU);
|
||||
interface->max_rx_mtu = mqnic_reg_read32(interface->if_ctrl_rb->regs, MQNIC_RB_IF_CTRL_REG_MAX_RX_MTU);
|
||||
|
||||
@ -597,21 +599,20 @@ struct mqnic_if *mqnic_if_open(struct mqnic *dev, int index, volatile uint8_t *r
|
||||
if (interface->rx_cpl_queue_count > MQNIC_MAX_RX_CPL_RINGS)
|
||||
interface->rx_cpl_queue_count = MQNIC_MAX_RX_CPL_RINGS;
|
||||
|
||||
interface->port_count = 0;
|
||||
while (interface->port_count < MQNIC_MAX_PORTS)
|
||||
for (int k = 0; k < interface->sched_block_count; k++)
|
||||
{
|
||||
struct reg_block *sched_block_rb = find_reg_block(interface->rb_list, MQNIC_RB_SCHED_BLOCK_TYPE, MQNIC_RB_SCHED_BLOCK_VER, interface->port_count);
|
||||
struct mqnic_port *port;
|
||||
struct reg_block *sched_block_rb = find_reg_block(interface->rb_list, MQNIC_RB_SCHED_BLOCK_TYPE, MQNIC_RB_SCHED_BLOCK_VER, k);
|
||||
struct mqnic_sched_block *sched_block;
|
||||
|
||||
if (!sched_block_rb)
|
||||
break;
|
||||
|
||||
port = mqnic_port_open(interface, interface->port_count, sched_block_rb);
|
||||
|
||||
if (!port)
|
||||
goto fail;
|
||||
|
||||
interface->ports[interface->port_count++] = port;
|
||||
sched_block = mqnic_sched_block_open(interface, k, sched_block_rb);
|
||||
|
||||
if (!sched_block)
|
||||
goto fail;
|
||||
|
||||
interface->sched_blocks[k] = sched_block;
|
||||
}
|
||||
|
||||
return interface;
|
||||
@ -626,13 +627,13 @@ void mqnic_if_close(struct mqnic_if *interface)
|
||||
if (!interface)
|
||||
return;
|
||||
|
||||
for (int k = 0; k < interface->port_count; k++)
|
||||
for (int k = 0; k < interface->sched_block_count; k++)
|
||||
{
|
||||
if (!interface->ports[k])
|
||||
if (!interface->sched_blocks[k])
|
||||
continue;
|
||||
|
||||
mqnic_port_close(interface->ports[k]);
|
||||
interface->ports[k] = NULL;
|
||||
mqnic_sched_block_close(interface->sched_blocks[k]);
|
||||
interface->sched_blocks[k] = NULL;
|
||||
}
|
||||
|
||||
if (interface->rb_list)
|
||||
@ -641,86 +642,86 @@ void mqnic_if_close(struct mqnic_if *interface)
|
||||
free(interface);
|
||||
}
|
||||
|
||||
struct mqnic_port *mqnic_port_open(struct mqnic_if *interface, int index, struct reg_block *block_rb)
|
||||
struct mqnic_sched_block *mqnic_sched_block_open(struct mqnic_if *interface, int index, struct reg_block *block_rb)
|
||||
{
|
||||
struct mqnic_port *port = calloc(1, sizeof(struct mqnic_port));
|
||||
struct mqnic_sched_block *block = calloc(1, sizeof(struct mqnic_sched_block));
|
||||
|
||||
if (!port)
|
||||
if (!block)
|
||||
return NULL;
|
||||
|
||||
int offset = mqnic_reg_read32(block_rb->regs, MQNIC_RB_SCHED_BLOCK_REG_OFFSET);
|
||||
|
||||
port->mqnic = interface->mqnic;
|
||||
port->interface = interface;
|
||||
block->mqnic = interface->mqnic;
|
||||
block->interface = interface;
|
||||
|
||||
port->index = index;
|
||||
block->index = index;
|
||||
|
||||
port->rb_list = enumerate_reg_block_list(interface->regs, offset, interface->regs_size);
|
||||
block->rb_list = enumerate_reg_block_list(interface->regs, offset, interface->regs_size);
|
||||
|
||||
if (!port->rb_list)
|
||||
if (!block->rb_list)
|
||||
{
|
||||
fprintf(stderr, "Error: filed to enumerate blocks\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
port->sched_count = 0;
|
||||
for (struct reg_block *rb = port->rb_list; rb->type && rb->version; rb++)
|
||||
block->sched_count = 0;
|
||||
for (struct reg_block *rb = block->rb_list; rb->type && rb->version; rb++)
|
||||
{
|
||||
if (rb->type == MQNIC_RB_SCHED_RR_TYPE && rb->version == MQNIC_RB_SCHED_RR_VER)
|
||||
{
|
||||
struct mqnic_sched *sched = mqnic_sched_open(port, port->sched_count, rb);
|
||||
struct mqnic_sched *sched = mqnic_sched_open(block, block->sched_count, rb);
|
||||
|
||||
if (!sched)
|
||||
goto fail;
|
||||
|
||||
port->sched[port->sched_count++] = sched;
|
||||
block->sched[block->sched_count++] = sched;
|
||||
}
|
||||
}
|
||||
|
||||
return port;
|
||||
return block;
|
||||
|
||||
fail:
|
||||
mqnic_port_close(port);
|
||||
mqnic_sched_block_close(block);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void mqnic_port_close(struct mqnic_port *port)
|
||||
void mqnic_sched_block_close(struct mqnic_sched_block *block)
|
||||
{
|
||||
if (!port)
|
||||
if (!block)
|
||||
return;
|
||||
|
||||
for (int k = 0; k < port->sched_count; k++)
|
||||
for (int k = 0; k < block->sched_count; k++)
|
||||
{
|
||||
if (!port->sched[k])
|
||||
if (!block->sched[k])
|
||||
continue;
|
||||
|
||||
mqnic_sched_close(port->sched[k]);
|
||||
port->sched[k] = NULL;
|
||||
mqnic_sched_close(block->sched[k]);
|
||||
block->sched[k] = NULL;
|
||||
}
|
||||
|
||||
if (port->rb_list)
|
||||
free_reg_block_list(port->rb_list);
|
||||
if (block->rb_list)
|
||||
free_reg_block_list(block->rb_list);
|
||||
|
||||
free(port);
|
||||
free(block);
|
||||
}
|
||||
|
||||
struct mqnic_sched *mqnic_sched_open(struct mqnic_port *port, int index, struct reg_block *rb)
|
||||
struct mqnic_sched *mqnic_sched_open(struct mqnic_sched_block *block, int index, struct reg_block *rb)
|
||||
{
|
||||
struct mqnic_sched *sched = calloc(1, sizeof(struct mqnic_sched));
|
||||
|
||||
if (!sched)
|
||||
return NULL;
|
||||
|
||||
sched->mqnic = port->mqnic;
|
||||
sched->interface = port->interface;
|
||||
sched->port = port;
|
||||
sched->mqnic = block->mqnic;
|
||||
sched->interface = block->interface;
|
||||
sched->sched_block = block;
|
||||
|
||||
sched->index = index;
|
||||
|
||||
sched->rb = rb;
|
||||
sched->regs = rb->base + mqnic_reg_read32(rb->regs, MQNIC_RB_SCHED_RR_REG_OFFSET);
|
||||
|
||||
if (sched->regs >= port->interface->regs+port->interface->regs_size)
|
||||
if (sched->regs >= block->interface->regs+block->interface->regs_size)
|
||||
{
|
||||
fprintf(stderr, "Error: computed pointer out of range\n");
|
||||
goto fail;
|
||||
|
@ -50,7 +50,7 @@ struct mqnic;
|
||||
struct mqnic_sched {
|
||||
struct mqnic *mqnic;
|
||||
struct mqnic_if *interface;
|
||||
struct mqnic_port *port;
|
||||
struct mqnic_sched_block *sched_block;
|
||||
|
||||
int index;
|
||||
|
||||
@ -65,7 +65,7 @@ struct mqnic_sched {
|
||||
volatile uint8_t *regs;
|
||||
};
|
||||
|
||||
struct mqnic_port {
|
||||
struct mqnic_sched_block {
|
||||
struct mqnic *mqnic;
|
||||
struct mqnic_if *interface;
|
||||
|
||||
@ -120,7 +120,8 @@ struct mqnic_if {
|
||||
uint32_t rx_cpl_queue_stride;
|
||||
|
||||
uint32_t port_count;
|
||||
struct mqnic_port *ports[MQNIC_MAX_PORTS];
|
||||
uint32_t sched_block_count;
|
||||
struct mqnic_sched_block *sched_blocks[MQNIC_MAX_PORTS];
|
||||
};
|
||||
|
||||
struct mqnic {
|
||||
@ -170,10 +171,10 @@ void mqnic_close(struct mqnic *dev);
|
||||
struct mqnic_if *mqnic_if_open(struct mqnic *dev, int index, volatile uint8_t *regs);
|
||||
void mqnic_if_close(struct mqnic_if *interface);
|
||||
|
||||
struct mqnic_port *mqnic_port_open(struct mqnic_if *interface, int index, struct reg_block *block_rb);
|
||||
void mqnic_port_close(struct mqnic_port *port);
|
||||
struct mqnic_sched_block *mqnic_sched_block_open(struct mqnic_if *interface, int index, struct reg_block *block_rb);
|
||||
void mqnic_sched_block_close(struct mqnic_sched_block *block);
|
||||
|
||||
struct mqnic_sched *mqnic_sched_open(struct mqnic_port *port, int index, struct reg_block *rb);
|
||||
struct mqnic_sched *mqnic_sched_open(struct mqnic_sched_block *block, int index, struct reg_block *rb);
|
||||
void mqnic_sched_close(struct mqnic_sched *sched);
|
||||
|
||||
#endif /* MQNIC_H */
|
||||
|
Loading…
x
Reference in New Issue
Block a user