From 11f31c896c2d1a59a0b640234dfe43b3c561849c Mon Sep 17 00:00:00 2001 From: Alex Forencich Date: Sun, 12 Dec 2021 17:28:43 -0800 Subject: [PATCH] Split interface from net_device --- modules/mqnic/Makefile | 1 + modules/mqnic/mqnic.h | 115 ++++++++++++------ modules/mqnic/mqnic_cq.c | 7 +- modules/mqnic/mqnic_eq.c | 26 ++-- modules/mqnic/mqnic_if.c | 230 +++++++++++++++++++++++++++++++++++ modules/mqnic/mqnic_main.c | 26 ++-- modules/mqnic/mqnic_netdev.c | 152 ++++++----------------- modules/mqnic/mqnic_port.c | 9 +- modules/mqnic/mqnic_rx.c | 54 ++++---- modules/mqnic/mqnic_tx.c | 30 +++-- 10 files changed, 415 insertions(+), 235 deletions(-) create mode 100644 modules/mqnic/mqnic_if.c diff --git a/modules/mqnic/Makefile b/modules/mqnic/Makefile index a0cc63de1..5c9c542f2 100644 --- a/modules/mqnic/Makefile +++ b/modules/mqnic/Makefile @@ -4,6 +4,7 @@ obj-m += mqnic.o mqnic-y += mqnic_main.o mqnic-y += mqnic_irq.o mqnic-y += mqnic_dev.o +mqnic-y += mqnic_if.o mqnic-y += mqnic_netdev.o mqnic-y += mqnic_port.o mqnic-y += mqnic_ptp.o diff --git a/modules/mqnic/mqnic.h b/modules/mqnic/mqnic.h index 60056cba0..27e104e8b 100644 --- a/modules/mqnic/mqnic.h +++ b/modules/mqnic/mqnic.h @@ -53,6 +53,7 @@ #include "mqnic_hw.h" struct mqnic_dev; +struct mqnic_if; struct mqnic_board_ops { int (*init)(struct mqnic_dev *mqnic); @@ -128,7 +129,7 @@ struct mqnic_dev { u32 if_stride; u32 if_csr_offset; - struct net_device *ndev[MQNIC_MAX_IF]; + struct mqnic_if *interface[MQNIC_MAX_IF]; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; @@ -203,7 +204,7 @@ struct mqnic_ring { }; struct device *dev; - struct net_device *ndev; + struct mqnic_if *interface; struct mqnic_priv *priv; int index; struct mqnic_cq_ring *cq_ring; @@ -229,8 +230,7 @@ struct mqnic_cq_ring { dma_addr_t buf_dma_addr; struct device *dev; - struct net_device *ndev; - struct mqnic_priv *priv; + struct mqnic_if *interface; struct napi_struct napi; int index; struct mqnic_eq_ring *eq_ring; @@ -260,8 +260,7 @@ struct mqnic_eq_ring { dma_addr_t buf_dma_addr; struct device *dev; - struct net_device *ndev; - struct mqnic_priv *priv; + struct mqnic_if *interface; int index; struct mqnic_irq *irq; int irq_index; @@ -279,8 +278,7 @@ struct mqnic_eq_ring { struct mqnic_port { struct device *dev; - struct net_device *ndev; - struct mqnic_priv *priv; + struct mqnic_if *interface; int index; @@ -289,6 +287,7 @@ struct mqnic_port { u32 port_id; u32 port_features; u32 port_mtu; + u32 sched_count; u32 sched_offset; u32 sched_stride; @@ -297,10 +296,56 @@ struct mqnic_port { u8 __iomem *hw_addr; }; +struct mqnic_if { + struct device *dev; + struct mqnic_dev *mdev; + + int index; + + u32 if_id; + u32 if_features; + + u32 event_queue_count; + u32 event_queue_offset; + struct mqnic_eq_ring *event_ring[MQNIC_MAX_EVENT_RINGS]; + + u32 tx_queue_count; + u32 tx_queue_offset; + struct mqnic_ring *tx_ring[MQNIC_MAX_TX_RINGS]; + + u32 tx_cpl_queue_count; + u32 tx_cpl_queue_offset; + struct mqnic_cq_ring *tx_cpl_ring[MQNIC_MAX_TX_CPL_RINGS]; + + u32 rx_queue_count; + u32 rx_queue_offset; + struct mqnic_ring *rx_ring[MQNIC_MAX_RX_RINGS]; + + u32 rx_cpl_queue_count; + u32 rx_cpl_queue_offset; + struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS]; + + u32 port_count; + u32 port_offset; + u32 port_stride; + struct mqnic_port *port[MQNIC_MAX_PORTS]; + + u32 max_desc_block_size; + + u8 __iomem *hw_addr; + u8 __iomem *csr_hw_addr; + + u32 ndev_count; + struct net_device *ndev[MQNIC_MAX_PORTS]; + + struct i2c_client *mod_i2c_client; +}; + struct mqnic_priv { struct device *dev; struct net_device *ndev; struct mqnic_dev *mdev; + struct mqnic_if *interface; spinlock_t stats_lock; @@ -308,34 +353,28 @@ struct mqnic_priv { bool registered; bool port_up; - u32 if_id; u32 if_features; + u32 event_queue_count; - u32 event_queue_offset; + struct mqnic_eq_ring *event_ring[MQNIC_MAX_EVENT_RINGS]; + u32 tx_queue_count; - u32 tx_queue_offset; + struct mqnic_ring *tx_ring[MQNIC_MAX_TX_RINGS]; + u32 tx_cpl_queue_count; - u32 tx_cpl_queue_offset; + struct mqnic_cq_ring *tx_cpl_ring[MQNIC_MAX_TX_CPL_RINGS]; + u32 rx_queue_count; - u32 rx_queue_offset; + struct mqnic_ring *rx_ring[MQNIC_MAX_RX_RINGS]; + u32 rx_cpl_queue_count; - u32 rx_cpl_queue_offset; + struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS]; + u32 port_count; - u32 port_offset; - u32 port_stride; + struct mqnic_port *port[MQNIC_MAX_PORTS]; u32 max_desc_block_size; - u8 __iomem *hw_addr; - u8 __iomem *csr_hw_addr; - - struct mqnic_eq_ring *event_ring[MQNIC_MAX_EVENT_RINGS]; - struct mqnic_ring *tx_ring[MQNIC_MAX_TX_RINGS]; - struct mqnic_cq_ring *tx_cpl_ring[MQNIC_MAX_TX_CPL_RINGS]; - struct mqnic_ring *rx_ring[MQNIC_MAX_RX_RINGS]; - struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS]; - struct mqnic_port *port[MQNIC_MAX_PORTS]; - struct hwtstamp_config hwts_config; struct i2c_client *mod_i2c_client; @@ -350,14 +389,18 @@ void mqnic_irq_deinit_pcie(struct mqnic_dev *mdev); // mqnic_dev.c extern const struct file_operations mqnic_fops; +// mqnic_if.c +int mqnic_create_interface(struct mqnic_dev *mdev, struct mqnic_if **interface_ptr, + int index, u8 __iomem *hw_addr); +void mqnic_destroy_interface(struct mqnic_if **interface_ptr); + // mqnic_netdev.c void mqnic_update_stats(struct net_device *ndev); -int mqnic_create_netdev(struct mqnic_dev *mdev, struct net_device **ndev_ptr, - int index, u8 __iomem *hw_addr); +int mqnic_create_netdev(struct mqnic_if *interface, struct net_device **ndev_ptr, int index); void mqnic_destroy_netdev(struct net_device **ndev_ptr); // mqnic_port.c -int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr, +int mqnic_create_port(struct mqnic_if *interface, struct mqnic_port **port_ptr, int index, u8 __iomem *hw_addr); void mqnic_destroy_port(struct mqnic_port **port_ptr); int mqnic_activate_port(struct mqnic_port *port); @@ -388,7 +431,7 @@ int mqnic_board_init(struct mqnic_dev *mqnic); void mqnic_board_deinit(struct mqnic_dev *mqnic); // mqnic_eq.c -int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr, +int mqnic_create_eq_ring(struct mqnic_if *interface, struct mqnic_eq_ring **ring_ptr, int index, u8 __iomem *hw_addr); void mqnic_destroy_eq_ring(struct mqnic_eq_ring **ring_ptr); int mqnic_alloc_eq_ring(struct mqnic_eq_ring *ring, int size, int stride); @@ -403,7 +446,7 @@ void mqnic_arm_eq(struct mqnic_eq_ring *ring); void mqnic_process_eq(struct mqnic_eq_ring *eq_ring); // mqnic_cq.c -int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr, +int mqnic_create_cq_ring(struct mqnic_if *interface, struct mqnic_cq_ring **ring_ptr, int index, u8 __iomem *hw_addr); void mqnic_destroy_cq_ring(struct mqnic_cq_ring **ring_ptr); int mqnic_alloc_cq_ring(struct mqnic_cq_ring *ring, int size, int stride); @@ -417,12 +460,13 @@ void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring); void mqnic_arm_cq(struct mqnic_cq_ring *ring); // mqnic_tx.c -int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, +int mqnic_create_tx_ring(struct mqnic_if *interface, struct mqnic_ring **ring_ptr, int index, u8 __iomem *hw_addr); void mqnic_destroy_tx_ring(struct mqnic_ring **ring_ptr); int mqnic_alloc_tx_ring(struct mqnic_ring *ring, int size, int stride); void mqnic_free_tx_ring(struct mqnic_ring *ring); -int mqnic_activate_tx_ring(struct mqnic_ring *ring, struct mqnic_cq_ring *cq_ring); +int mqnic_activate_tx_ring(struct mqnic_ring *ring, struct mqnic_priv *priv, + struct mqnic_cq_ring *cq_ring); void mqnic_deactivate_tx_ring(struct mqnic_ring *ring); bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring); bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring); @@ -436,12 +480,13 @@ int mqnic_poll_tx_cq(struct napi_struct *napi, int budget); netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev); // mqnic_rx.c -int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, +int mqnic_create_rx_ring(struct mqnic_if *interface, struct mqnic_ring **ring_ptr, int index, u8 __iomem *hw_addr); void mqnic_destroy_rx_ring(struct mqnic_ring **ring_ptr); int mqnic_alloc_rx_ring(struct mqnic_ring *ring, int size, int stride); void mqnic_free_rx_ring(struct mqnic_ring *ring); -int mqnic_activate_rx_ring(struct mqnic_ring *ring, struct mqnic_cq_ring *cq_ring); +int mqnic_activate_rx_ring(struct mqnic_ring *ring, struct mqnic_priv *priv, + struct mqnic_cq_ring *cq_ring); void mqnic_deactivate_rx_ring(struct mqnic_ring *ring); bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring); bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring); diff --git a/modules/mqnic/mqnic_cq.c b/modules/mqnic/mqnic_cq.c index 0fd2634af..962b4118b 100644 --- a/modules/mqnic/mqnic_cq.c +++ b/modules/mqnic/mqnic_cq.c @@ -35,7 +35,7 @@ #include "mqnic.h" -int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr, +int mqnic_create_cq_ring(struct mqnic_if *interface, struct mqnic_cq_ring **ring_ptr, int index, u8 __iomem *hw_addr) { struct mqnic_cq_ring *ring; @@ -44,9 +44,8 @@ int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_pt if (!ring) return -ENOMEM; - ring->dev = priv->dev; - ring->ndev = priv->ndev; - ring->priv = priv; + ring->dev = interface->dev; + ring->interface = interface; ring->index = index; ring->active = 0; diff --git a/modules/mqnic/mqnic_eq.c b/modules/mqnic/mqnic_eq.c index efb1ed5a8..fc67ecf78 100644 --- a/modules/mqnic/mqnic_eq.c +++ b/modules/mqnic/mqnic_eq.c @@ -45,7 +45,7 @@ static int mqnic_eq_int(struct notifier_block *nb, unsigned long action, void *d return NOTIFY_DONE; } -int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr, +int mqnic_create_eq_ring(struct mqnic_if *interface, struct mqnic_eq_ring **ring_ptr, int index, u8 __iomem *hw_addr) { struct mqnic_eq_ring *ring; @@ -54,9 +54,8 @@ int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_pt if (!ring) return -ENOMEM; - ring->dev = priv->dev; - ring->ndev = priv->ndev; - ring->priv = priv; + ring->dev = interface->dev; + ring->interface = interface; ring->index = index; ring->active = 0; @@ -217,16 +216,13 @@ void mqnic_arm_eq(struct mqnic_eq_ring *ring) void mqnic_process_eq(struct mqnic_eq_ring *eq_ring) { - struct mqnic_priv *priv = eq_ring->priv; + struct mqnic_if *interface = eq_ring->interface; struct mqnic_event *event; struct mqnic_cq_ring *cq_ring; u32 eq_index; u32 eq_tail_ptr; int done = 0; - if (unlikely(!priv->port_up)) - return; - // read head pointer from NIC mqnic_eq_read_head_ptr(eq_ring); @@ -238,33 +234,33 @@ void mqnic_process_eq(struct mqnic_eq_ring *eq_ring) if (event->type == MQNIC_EVENT_TYPE_TX_CPL) { // transmit completion event - if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count)) { + if (unlikely(le16_to_cpu(event->source) > interface->tx_cpl_queue_count)) { dev_err(eq_ring->dev, "%s on port %d: unknown event source %d (index %d, type %d)", - __func__, priv->index, le16_to_cpu(event->source), eq_index, + __func__, interface->index, le16_to_cpu(event->source), eq_index, le16_to_cpu(event->type)); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true); } else { - cq_ring = priv->tx_cpl_ring[le16_to_cpu(event->source)]; + cq_ring = interface->tx_cpl_ring[le16_to_cpu(event->source)]; if (likely(cq_ring && cq_ring->handler)) cq_ring->handler(cq_ring); } } else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL) { // receive completion event - if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count)) { + if (unlikely(le16_to_cpu(event->source) > interface->rx_cpl_queue_count)) { dev_err(eq_ring->dev, "%s on port %d: unknown event source %d (index %d, type %d)", - __func__, priv->index, le16_to_cpu(event->source), eq_index, + __func__, interface->index, le16_to_cpu(event->source), eq_index, le16_to_cpu(event->type)); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true); } else { - cq_ring = priv->rx_cpl_ring[le16_to_cpu(event->source)]; + cq_ring = interface->rx_cpl_ring[le16_to_cpu(event->source)]; if (likely(cq_ring && cq_ring->handler)) cq_ring->handler(cq_ring); } } else { dev_err(eq_ring->dev, "%s on port %d: unknown event type %d (index %d, source %d)", - __func__, priv->index, le16_to_cpu(event->type), eq_index, + __func__, interface->index, le16_to_cpu(event->type), eq_index, le16_to_cpu(event->source)); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true); diff --git a/modules/mqnic/mqnic_if.c b/modules/mqnic/mqnic_if.c new file mode 100644 index 000000000..d1e66e951 --- /dev/null +++ b/modules/mqnic/mqnic_if.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: BSD-2-Clause-Views +/* + * Copyright 2021, The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation + * are those of the authors and should not be interpreted as representing + * official policies, either expressed or implied, of The Regents of the + * University of California. + */ + +#include "mqnic.h" + +int mqnic_create_interface(struct mqnic_dev *mdev, struct mqnic_if **interface_ptr, + int index, u8 __iomem *hw_addr) +{ + struct device *dev = mdev->dev; + struct mqnic_if *interface; + struct mqnic_priv *priv; + int ret = 0; + int k; + u32 desc_block_size; + + interface = kzalloc(sizeof(*interface), GFP_KERNEL); + if (!interface) + return -ENOMEM; + + interface->mdev = mdev; + interface->dev = dev; + + interface->index = index; + + interface->hw_addr = hw_addr; + interface->csr_hw_addr = hw_addr + mdev->if_csr_offset; + + // read ID registers + interface->if_id = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_IF_ID); + dev_info(dev, "IF ID: 0x%08x", interface->if_id); + interface->if_features = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_IF_FEATURES); + dev_info(dev, "IF features: 0x%08x", interface->if_features); + + interface->event_queue_count = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_EVENT_QUEUE_COUNT); + dev_info(dev, "Event queue count: %d", interface->event_queue_count); + interface->event_queue_offset = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_EVENT_QUEUE_OFFSET); + dev_info(dev, "Event queue offset: 0x%08x", interface->event_queue_offset); + + interface->event_queue_count = min_t(u32, interface->event_queue_count, MQNIC_MAX_EVENT_RINGS); + + interface->tx_queue_count = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_TX_QUEUE_COUNT); + dev_info(dev, "TX queue count: %d", interface->tx_queue_count); + interface->tx_queue_offset = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_TX_QUEUE_OFFSET); + dev_info(dev, "TX queue offset: 0x%08x", interface->tx_queue_offset); + + interface->tx_queue_count = min_t(u32, interface->tx_queue_count, MQNIC_MAX_TX_RINGS); + + interface->tx_cpl_queue_count = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_TX_CPL_QUEUE_COUNT); + dev_info(dev, "TX completion queue count: %d", interface->tx_cpl_queue_count); + interface->tx_cpl_queue_offset = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_TX_CPL_QUEUE_OFFSET); + dev_info(dev, "TX completion queue offset: 0x%08x", interface->tx_cpl_queue_offset); + + interface->tx_cpl_queue_count = min_t(u32, interface->tx_cpl_queue_count, MQNIC_MAX_TX_CPL_RINGS); + + interface->rx_queue_count = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_RX_QUEUE_COUNT); + dev_info(dev, "RX queue count: %d", interface->rx_queue_count); + interface->rx_queue_offset = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_RX_QUEUE_OFFSET); + dev_info(dev, "RX queue offset: 0x%08x", interface->rx_queue_offset); + + interface->rx_queue_count = min_t(u32, interface->rx_queue_count, MQNIC_MAX_RX_RINGS); + + interface->rx_cpl_queue_count = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_RX_CPL_QUEUE_COUNT); + dev_info(dev, "RX completion queue count: %d", interface->rx_cpl_queue_count); + interface->rx_cpl_queue_offset = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_RX_CPL_QUEUE_OFFSET); + dev_info(dev, "RX completion queue offset: 0x%08x", interface->rx_cpl_queue_offset); + + interface->rx_cpl_queue_count = min_t(u32, interface->rx_cpl_queue_count, MQNIC_MAX_RX_CPL_RINGS); + + interface->port_count = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_PORT_COUNT); + dev_info(dev, "Port count: %d", interface->port_count); + interface->port_offset = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_PORT_OFFSET); + dev_info(dev, "Port offset: 0x%08x", interface->port_offset); + interface->port_stride = ioread32(interface->csr_hw_addr + MQNIC_IF_REG_PORT_STRIDE); + dev_info(dev, "Port stride: 0x%08x", interface->port_stride); + + interface->port_count = min_t(u32, interface->port_count, MQNIC_MAX_PORTS); + + // determine desc block size + iowrite32(0xf << 8, hw_addr + interface->tx_queue_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); + interface->max_desc_block_size = 1 << ((ioread32(hw_addr + interface->tx_queue_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG) >> 8) & 0xf); + iowrite32(0, hw_addr + interface->tx_queue_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); + + dev_info(dev, "Max desc block size: %d", interface->max_desc_block_size); + + interface->max_desc_block_size = min_t(u32, interface->max_desc_block_size, MQNIC_MAX_FRAGS); + + desc_block_size = min_t(u32, interface->max_desc_block_size, 4); + + *interface_ptr = interface; + + // create rings + for (k = 0; k < interface->event_queue_count; k++) { + ret = mqnic_create_eq_ring(interface, &interface->event_ring[k], k, + hw_addr + interface->event_queue_offset + k * MQNIC_EVENT_QUEUE_STRIDE); + if (ret) + goto fail; + + ret = mqnic_alloc_eq_ring(interface->event_ring[k], 1024, MQNIC_EVENT_SIZE); // TODO configure/constant + if (ret) + goto fail; + + mqnic_activate_eq_ring(interface->event_ring[k], mdev->irq[k % mdev->irq_count]); + mqnic_arm_eq(interface->event_ring[k]); + } + + for (k = 0; k < interface->tx_queue_count; k++) { + ret = mqnic_create_tx_ring(interface, &interface->tx_ring[k], k, + hw_addr + interface->tx_queue_offset + k * MQNIC_QUEUE_STRIDE); + if (ret) + goto fail; + } + + for (k = 0; k < interface->tx_cpl_queue_count; k++) { + ret = mqnic_create_cq_ring(interface, &interface->tx_cpl_ring[k], k, + hw_addr + interface->tx_cpl_queue_offset + k * MQNIC_CPL_QUEUE_STRIDE); + if (ret) + goto fail; + } + + for (k = 0; k < interface->rx_queue_count; k++) { + ret = mqnic_create_rx_ring(interface, &interface->rx_ring[k], k, + hw_addr + interface->rx_queue_offset + k * MQNIC_QUEUE_STRIDE); + if (ret) + goto fail; + } + + for (k = 0; k < interface->rx_cpl_queue_count; k++) { + ret = mqnic_create_cq_ring(interface, &interface->rx_cpl_ring[k], k, + hw_addr + interface->rx_cpl_queue_offset + k * MQNIC_CPL_QUEUE_STRIDE); + if (ret) + goto fail; + } + + // create ports + for (k = 0; k < interface->port_count; k++) { + ret = mqnic_create_port(interface, &interface->port[k], k, + hw_addr + interface->port_offset + k * interface->port_stride); + if (ret) + goto fail; + } + + // create net_devices + interface->ndev_count = 1; + for (k = 0; k < interface->ndev_count; k++) { + ret = mqnic_create_netdev(interface, &interface->ndev[k], k); + if (ret) + goto fail; + + priv = netdev_priv(interface->ndev[k]); + priv->mod_i2c_client = interface->mod_i2c_client; + } + + return 0; + +fail: + mqnic_destroy_interface(interface_ptr); + return ret; +} + +void mqnic_destroy_interface(struct mqnic_if **interface_ptr) +{ + struct mqnic_if *interface = *interface_ptr; + int k; + + // destroy associated net_devices + for (k = 0; k < ARRAY_SIZE(interface->ndev); k++) + if (interface->ndev[k]) + mqnic_destroy_netdev(&interface->ndev[k]); + + // free rings + for (k = 0; k < ARRAY_SIZE(interface->event_ring); k++) + if (interface->event_ring[k]) + mqnic_destroy_eq_ring(&interface->event_ring[k]); + + for (k = 0; k < ARRAY_SIZE(interface->tx_ring); k++) + if (interface->tx_ring[k]) + mqnic_destroy_tx_ring(&interface->tx_ring[k]); + + for (k = 0; k < ARRAY_SIZE(interface->tx_cpl_ring); k++) + if (interface->tx_cpl_ring[k]) + mqnic_destroy_cq_ring(&interface->tx_cpl_ring[k]); + + for (k = 0; k < ARRAY_SIZE(interface->rx_ring); k++) + if (interface->rx_ring[k]) + mqnic_destroy_rx_ring(&interface->rx_ring[k]); + + for (k = 0; k < ARRAY_SIZE(interface->rx_cpl_ring); k++) + if (interface->rx_cpl_ring[k]) + mqnic_destroy_cq_ring(&interface->rx_cpl_ring[k]); + + // free ports + for (k = 0; k < ARRAY_SIZE(interface->port); k++) + if (interface->port[k]) + mqnic_destroy_port(&interface->port[k]); + + *interface_ptr = NULL; + kfree(interface); +} diff --git a/modules/mqnic/mqnic_main.c b/modules/mqnic/mqnic_main.c index 111524e1c..89d8d7181 100644 --- a/modules/mqnic/mqnic_main.c +++ b/modules/mqnic/mqnic_main.c @@ -82,7 +82,6 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent { int ret = 0; struct mqnic_dev *mqnic; - struct mqnic_priv *priv; struct device *dev = &pdev->dev; int k = 0; @@ -278,17 +277,16 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent for (k = 0; k < mqnic->if_count; k++) { dev_info(dev, "Creating interface %d", k); - ret = mqnic_create_netdev(mqnic, &mqnic->ndev[k], k, mqnic->hw_addr + k * mqnic->if_stride); + ret = mqnic_create_interface(mqnic, &mqnic->interface[k], k, mqnic->hw_addr + k * mqnic->if_stride); if (ret) { - dev_err(dev, "Failed to create net_device"); - goto fail_init_netdev; + dev_err(dev, "Failed to create interface: %d", ret); + goto fail_create_if; } } - // pass module I2C clients to net_device instances + // pass module I2C clients to interface instances for (k = 0; k < mqnic->if_count; k++) { - priv = netdev_priv(mqnic->ndev[k]); - priv->mod_i2c_client = mqnic->mod_i2c_client[k]; + mqnic->interface[k]->mod_i2c_client = mqnic->mod_i2c_client[k]; } mqnic->misc_dev.minor = MISC_DYNAMIC_MINOR; @@ -313,10 +311,10 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent // error handling fail_miscdev: -fail_init_netdev: - for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++) - if (mqnic->ndev[k]) - mqnic_destroy_netdev(&mqnic->ndev[k]); +fail_create_if: + for (k = 0; k < ARRAY_SIZE(mqnic->interface); k++) + if (mqnic->interface[k]) + mqnic_destroy_interface(&mqnic->interface[k]); mqnic_unregister_phc(mqnic); pci_clear_master(pdev); fail_board: @@ -352,9 +350,9 @@ static void mqnic_pci_remove(struct pci_dev *pdev) list_del(&mqnic->dev_list_node); spin_unlock(&mqnic_devices_lock); - for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++) - if (mqnic->ndev[k]) - mqnic_destroy_netdev(&mqnic->ndev[k]); + for (k = 0; k < ARRAY_SIZE(mqnic->interface); k++) + if (mqnic->interface[k]) + mqnic_destroy_interface(&mqnic->interface[k]); mqnic_unregister_phc(mqnic); diff --git a/modules/mqnic/mqnic_netdev.c b/modules/mqnic/mqnic_netdev.c index 1d578dfff..908366736 100644 --- a/modules/mqnic/mqnic_netdev.c +++ b/modules/mqnic/mqnic_netdev.c @@ -43,12 +43,6 @@ static int mqnic_start_port(struct net_device *ndev) dev_info(mdev->dev, "%s on port %d", __func__, priv->index); - // set up event queues - for (k = 0; k < priv->event_queue_count; k++) { - mqnic_activate_eq_ring(priv->event_ring[k], priv->mdev->irq[k % mdev->irq_count]); - mqnic_arm_eq(priv->event_ring[k]); - } - // set up RX queues for (k = 0; k < min(priv->rx_queue_count, priv->rx_cpl_queue_count); k++) { // set up CQ @@ -67,7 +61,7 @@ static int mqnic_start_port(struct net_device *ndev) priv->rx_ring[k]->page_order = 0; else priv->rx_ring[k]->page_order = ilog2((ndev->mtu + ETH_HLEN + PAGE_SIZE - 1) / PAGE_SIZE - 1) + 1; - mqnic_activate_rx_ring(priv->rx_ring[k], priv->rx_cpl_ring[k]); + mqnic_activate_rx_ring(priv->rx_ring[k], priv, priv->rx_cpl_ring[k]); } // set up TX queues @@ -84,7 +78,7 @@ static int mqnic_start_port(struct net_device *ndev) // set up queue priv->tx_ring[k]->tx_queue = netdev_get_tx_queue(ndev, k); - mqnic_activate_tx_ring(priv->tx_ring[k], priv->tx_cpl_ring[k]); + mqnic_activate_tx_ring(priv->tx_ring[k], priv, priv->tx_cpl_ring[k]); } // configure ports @@ -153,10 +147,6 @@ static int mqnic_stop_port(struct net_device *ndev) netif_napi_del(&priv->rx_cpl_ring[k]->napi); } - // deactivate event queues - for (k = 0; k < priv->event_queue_count; k++) - mqnic_deactivate_eq_ring(priv->event_ring[k]); - msleep(20); // free descriptors in TX queues @@ -357,10 +347,10 @@ static const struct net_device_ops mqnic_netdev_ops = { .ndo_do_ioctl = mqnic_ioctl, }; -int mqnic_create_netdev(struct mqnic_dev *mdev, struct net_device **ndev_ptr, - int index, u8 __iomem *hw_addr) +int mqnic_create_netdev(struct mqnic_if *interface, struct net_device **ndev_ptr, int index) { - struct device *dev = mdev->dev; + struct mqnic_dev *mdev = interface->mdev; + struct device *dev = interface->dev; struct net_device *ndev; struct mqnic_priv *priv; int ret = 0; @@ -383,55 +373,38 @@ int mqnic_create_netdev(struct mqnic_dev *mdev, struct net_device **ndev_ptr, spin_lock_init(&priv->stats_lock); priv->ndev = ndev; - priv->mdev = mdev; + priv->mdev = interface->mdev; + priv->interface = interface; priv->dev = dev; priv->index = index; priv->port_up = false; - priv->hw_addr = hw_addr; - priv->csr_hw_addr = hw_addr + mdev->if_csr_offset; + // associate interface resources + priv->if_features = interface->if_features; - // read ID registers - priv->if_id = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_IF_ID); - dev_info(dev, "IF ID: 0x%08x", priv->if_id); - priv->if_features = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_IF_FEATURES); - dev_info(dev, "IF features: 0x%08x", priv->if_features); + priv->event_queue_count = interface->event_queue_count; + for (k = 0; k < interface->event_queue_count; k++) + priv->event_ring[k] = interface->event_ring[k]; - priv->event_queue_count = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_EVENT_QUEUE_COUNT); - dev_info(dev, "Event queue count: %d", priv->event_queue_count); - priv->event_queue_offset = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_EVENT_QUEUE_OFFSET); - dev_info(dev, "Event queue offset: 0x%08x", priv->event_queue_offset); - priv->tx_queue_count = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_TX_QUEUE_COUNT); - dev_info(dev, "TX queue count: %d", priv->tx_queue_count); - priv->tx_queue_offset = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_TX_QUEUE_OFFSET); - dev_info(dev, "TX queue offset: 0x%08x", priv->tx_queue_offset); - priv->tx_cpl_queue_count = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_TX_CPL_QUEUE_COUNT); - dev_info(dev, "TX completion queue count: %d", priv->tx_cpl_queue_count); - priv->tx_cpl_queue_offset = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_TX_CPL_QUEUE_OFFSET); - dev_info(dev, "TX completion queue offset: 0x%08x", priv->tx_cpl_queue_offset); - priv->rx_queue_count = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_RX_QUEUE_COUNT); - dev_info(dev, "RX queue count: %d", priv->rx_queue_count); - priv->rx_queue_offset = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_RX_QUEUE_OFFSET); - dev_info(dev, "RX queue offset: 0x%08x", priv->rx_queue_offset); - priv->rx_cpl_queue_count = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_RX_CPL_QUEUE_COUNT); - dev_info(dev, "RX completion queue count: %d", priv->rx_cpl_queue_count); - priv->rx_cpl_queue_offset = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_RX_CPL_QUEUE_OFFSET); - dev_info(dev, "RX completion queue offset: 0x%08x", priv->rx_cpl_queue_offset); - priv->port_count = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_PORT_COUNT); - dev_info(dev, "Port count: %d", priv->port_count); - priv->port_offset = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_PORT_OFFSET); - dev_info(dev, "Port offset: 0x%08x", priv->port_offset); - priv->port_stride = ioread32(priv->csr_hw_addr + MQNIC_IF_REG_PORT_STRIDE); - dev_info(dev, "Port stride: 0x%08x", priv->port_stride); + priv->tx_queue_count = interface->tx_queue_count; + for (k = 0; k < interface->tx_queue_count; k++) + priv->tx_ring[k] = interface->tx_ring[k]; - priv->event_queue_count = min_t(u32, priv->event_queue_count, MQNIC_MAX_EVENT_RINGS); + priv->tx_cpl_queue_count = interface->tx_cpl_queue_count; + for (k = 0; k < interface->tx_cpl_queue_count; k++) + priv->tx_cpl_ring[k] = interface->tx_cpl_ring[k]; - priv->tx_queue_count = min_t(u32, priv->tx_queue_count, MQNIC_MAX_TX_RINGS); - priv->tx_cpl_queue_count = min_t(u32, priv->tx_cpl_queue_count, MQNIC_MAX_TX_CPL_RINGS); - priv->rx_queue_count = min_t(u32, priv->rx_queue_count, MQNIC_MAX_RX_RINGS); - priv->rx_cpl_queue_count = min_t(u32, priv->rx_cpl_queue_count, MQNIC_MAX_RX_CPL_RINGS); + priv->rx_queue_count = interface->rx_queue_count; + for (k = 0; k < interface->rx_queue_count; k++) + priv->rx_ring[k] = interface->rx_ring[k]; - priv->port_count = min_t(u32, priv->port_count, MQNIC_MAX_PORTS); + priv->rx_cpl_queue_count = interface->rx_cpl_queue_count; + for (k = 0; k < interface->rx_cpl_queue_count; k++) + priv->rx_cpl_ring[k] = interface->rx_cpl_ring[k]; + + priv->port_count = interface->port_count; + for (k = 0; k < interface->port_count; k++) + priv->port[k] = interface->port[k]; netif_set_real_num_tx_queues(ndev, priv->tx_queue_count); netif_set_real_num_rx_queues(ndev, priv->rx_queue_count); @@ -455,82 +428,33 @@ int mqnic_create_netdev(struct mqnic_dev *mdev, struct net_device **ndev_ptr, priv->hwts_config.tx_type = HWTSTAMP_TX_OFF; priv->hwts_config.rx_filter = HWTSTAMP_FILTER_NONE; - // determine desc block size - iowrite32(0xf << 8, hw_addr + priv->tx_queue_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); - priv->max_desc_block_size = 1 << ((ioread32(hw_addr + priv->tx_queue_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG) >> 8) & 0xf); - iowrite32(0, hw_addr + priv->tx_queue_offset + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); - - dev_info(dev, "Max desc block size: %d", priv->max_desc_block_size); - - priv->max_desc_block_size = min_t(u32, priv->max_desc_block_size, MQNIC_MAX_FRAGS); - - desc_block_size = min_t(u32, priv->max_desc_block_size, 4); - - // allocate rings - for (k = 0; k < priv->event_queue_count; k++) { - ret = mqnic_create_eq_ring(priv, &priv->event_ring[k], k, - hw_addr + priv->event_queue_offset + k * MQNIC_EVENT_QUEUE_STRIDE); - if (ret) - goto fail; - - ret = mqnic_alloc_eq_ring(priv->event_ring[k], 1024, MQNIC_EVENT_SIZE); // TODO configure/constant - if (ret) - goto fail; - } + desc_block_size = min_t(u32, interface->max_desc_block_size, 4); + // allocate ring buffers for (k = 0; k < priv->tx_queue_count; k++) { - ret = mqnic_create_tx_ring(priv, &priv->tx_ring[k], k, - hw_addr + priv->tx_queue_offset + k * MQNIC_QUEUE_STRIDE); - if (ret) - goto fail; - ret = mqnic_alloc_tx_ring(priv->tx_ring[k], 1024, MQNIC_DESC_SIZE * desc_block_size); // TODO configure/constant if (ret) goto fail; } for (k = 0; k < priv->tx_cpl_queue_count; k++) { - ret = mqnic_create_cq_ring(priv, &priv->tx_cpl_ring[k], k, - hw_addr + priv->tx_cpl_queue_offset + k * MQNIC_CPL_QUEUE_STRIDE); - if (ret) - goto fail; - ret = mqnic_alloc_cq_ring(priv->tx_cpl_ring[k], 1024, MQNIC_CPL_SIZE); // TODO configure/constant if (ret) goto fail; } for (k = 0; k < priv->rx_queue_count; k++) { - ret = mqnic_create_rx_ring(priv, &priv->rx_ring[k], k, - hw_addr + priv->rx_queue_offset + k * MQNIC_QUEUE_STRIDE); - if (ret) - goto fail; - ret = mqnic_alloc_rx_ring(priv->rx_ring[k], 1024, MQNIC_DESC_SIZE); // TODO configure/constant if (ret) goto fail; } for (k = 0; k < priv->rx_cpl_queue_count; k++) { - ret = mqnic_create_cq_ring(priv, &priv->rx_cpl_ring[k], k, - hw_addr + priv->rx_cpl_queue_offset + k * MQNIC_CPL_QUEUE_STRIDE); - if (ret) - goto fail; - ret = mqnic_alloc_cq_ring(priv->rx_cpl_ring[k], 1024, MQNIC_CPL_SIZE); // TODO configure/constant if (ret) goto fail; } - for (k = 0; k < priv->port_count; k++) { - ret = mqnic_create_port(priv, &priv->port[k], k, - hw_addr + priv->port_offset + k * priv->port_stride); - if (ret) - goto fail; - - mqnic_port_set_rss_mask(priv->port[k], 0xffffffff); - } - // entry points ndev->netdev_ops = &mqnic_netdev_ops; ndev->ethtool_ops = &mqnic_ethtool_ops; @@ -584,29 +508,21 @@ void mqnic_destroy_netdev(struct net_device **ndev_ptr) *ndev_ptr = NULL; // free rings - for (k = 0; k < ARRAY_SIZE(priv->event_ring); k++) - if (priv->event_ring[k]) - mqnic_destroy_eq_ring(&priv->event_ring[k]); - for (k = 0; k < ARRAY_SIZE(priv->tx_ring); k++) if (priv->tx_ring[k]) - mqnic_destroy_tx_ring(&priv->tx_ring[k]); + mqnic_free_tx_ring(priv->tx_ring[k]); for (k = 0; k < ARRAY_SIZE(priv->tx_cpl_ring); k++) if (priv->tx_cpl_ring[k]) - mqnic_destroy_cq_ring(&priv->tx_cpl_ring[k]); + mqnic_free_cq_ring(priv->tx_cpl_ring[k]); for (k = 0; k < ARRAY_SIZE(priv->rx_ring); k++) if (priv->rx_ring[k]) - mqnic_destroy_rx_ring(&priv->rx_ring[k]); + mqnic_free_rx_ring(priv->rx_ring[k]); for (k = 0; k < ARRAY_SIZE(priv->rx_cpl_ring); k++) if (priv->rx_cpl_ring[k]) - mqnic_destroy_cq_ring(&priv->rx_cpl_ring[k]); - - for (k = 0; k < ARRAY_SIZE(priv->port); k++) - if (priv->port[k]) - mqnic_destroy_port(&priv->port[k]); + mqnic_free_cq_ring(priv->rx_cpl_ring[k]); free_netdev(ndev); } diff --git a/modules/mqnic/mqnic_port.c b/modules/mqnic/mqnic_port.c index a4a78cd5b..1aa11df1e 100644 --- a/modules/mqnic/mqnic_port.c +++ b/modules/mqnic/mqnic_port.c @@ -35,10 +35,10 @@ #include "mqnic.h" -int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr, +int mqnic_create_port(struct mqnic_if *interface, struct mqnic_port **port_ptr, int index, u8 __iomem *hw_addr) { - struct device *dev = priv->dev; + struct device *dev = interface->dev; struct mqnic_port *port; port = kzalloc(sizeof(*port), GFP_KERNEL); @@ -48,12 +48,11 @@ int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr, *port_ptr = port; port->dev = dev; - port->ndev = priv->ndev; - port->priv = priv; + port->interface = interface; port->index = index; - port->tx_queue_count = priv->tx_queue_count; + port->tx_queue_count = interface->tx_queue_count; port->hw_addr = hw_addr; diff --git a/modules/mqnic/mqnic_rx.c b/modules/mqnic/mqnic_rx.c index ac88849f6..a704e9619 100644 --- a/modules/mqnic/mqnic_rx.c +++ b/modules/mqnic/mqnic_rx.c @@ -35,7 +35,7 @@ #include "mqnic.h" -int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, +int mqnic_create_rx_ring(struct mqnic_if *interface, struct mqnic_ring **ring_ptr, int index, u8 __iomem *hw_addr) { struct mqnic_ring *ring; @@ -44,9 +44,8 @@ int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, if (!ring) return -ENOMEM; - ring->dev = priv->dev; - ring->ndev = priv->ndev; - ring->priv = priv; + ring->dev = interface->dev; + ring->interface = interface; ring->index = index; ring->active = 0; @@ -146,13 +145,15 @@ void mqnic_free_rx_ring(struct mqnic_ring *ring) ring->rx_info = NULL; } -int mqnic_activate_rx_ring(struct mqnic_ring *ring, struct mqnic_cq_ring *cq_ring) +int mqnic_activate_rx_ring(struct mqnic_ring *ring, struct mqnic_priv *priv, + struct mqnic_cq_ring *cq_ring) { mqnic_deactivate_rx_ring(ring); - if (!ring->buf || !cq_ring || cq_ring->handler || cq_ring->src_ring) + if (!ring->buf || !priv || !cq_ring || cq_ring->handler || cq_ring->src_ring) return -EINVAL; + ring->priv = priv; ring->cq_ring = cq_ring; cq_ring->src_ring = ring; cq_ring->handler = mqnic_rx_irq; @@ -189,6 +190,7 @@ void mqnic_deactivate_rx_ring(struct mqnic_ring *ring) ring->cq_ring->handler = NULL; } + ring->priv = NULL; ring->cq_ring = NULL; ring->active = 0; @@ -255,15 +257,15 @@ int mqnic_prepare_rx_desc(struct mqnic_ring *ring, int index) dma_addr_t dma_addr; if (unlikely(page)) { - dev_err(ring->dev, "%s: skb not yet processed on port %d", - __func__, ring->priv->index); + dev_err(ring->dev, "%s: skb not yet processed on interface %d", + __func__, ring->interface->index); return -1; } page = dev_alloc_pages(page_order); if (unlikely(!page)) { - dev_err(ring->dev, "%s: failed to allocate memory on port %d", - __func__, ring->priv->index); + dev_err(ring->dev, "%s: failed to allocate memory on interface %d", + __func__, ring->interface->index); return -1; } @@ -271,8 +273,8 @@ int mqnic_prepare_rx_desc(struct mqnic_ring *ring, int index) dma_addr = dma_map_page(ring->dev, page, 0, len, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(ring->dev, dma_addr))) { - dev_err(ring->dev, "%s: DMA mapping failed on port %d", - __func__, ring->priv->index); + dev_err(ring->dev, "%s: DMA mapping failed on interface %d", + __func__, ring->interface->index); __free_pages(page, page_order); return -1; } @@ -311,9 +313,10 @@ void mqnic_refill_rx_buffers(struct mqnic_ring *ring) int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget) { - struct mqnic_priv *priv = cq_ring->priv; - struct net_device *ndev = priv->ndev; + struct mqnic_if *interface = cq_ring->interface; + struct device *dev = interface->dev; struct mqnic_ring *rx_ring = cq_ring->src_ring; + struct mqnic_priv *priv = rx_ring->priv; struct mqnic_rx_info *rx_info; struct mqnic_cpl *cpl; struct sk_buff *skb; @@ -326,7 +329,7 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget) int budget = napi_budget; u32 len; - if (unlikely(!priv->port_up)) + if (unlikely(!priv || !priv->port_up)) return done; // process completion queue @@ -345,7 +348,7 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget) page = rx_info->page; if (unlikely(!page)) { - dev_err(priv->dev, "%s: ring %d null page at index %d", + dev_err(dev, "%s: ring %d null page at index %d", __func__, cq_ring->index, ring_index); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, cpl, MQNIC_CPL_SIZE, true); @@ -354,31 +357,31 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget) skb = napi_get_frags(&cq_ring->napi); if (unlikely(!skb)) { - dev_err(priv->dev, "%s: ring %d failed to allocate skb", + dev_err(dev, "%s: ring %d failed to allocate skb", __func__, cq_ring->index); break; } // RX hardware timestamp - if (priv->if_features & MQNIC_IF_FEATURE_PTP_TS) - skb_hwtstamps(skb)->hwtstamp = mqnic_read_cpl_ts(priv->mdev, rx_ring, cpl); + if (interface->if_features & MQNIC_IF_FEATURE_PTP_TS) + skb_hwtstamps(skb)->hwtstamp = mqnic_read_cpl_ts(interface->mdev, rx_ring, cpl); skb_record_rx_queue(skb, rx_ring->index); // RX hardware checksum - if (ndev->features & NETIF_F_RXCSUM) { + if (priv->ndev->features & NETIF_F_RXCSUM) { skb->csum = csum_unfold((__sum16) cpu_to_be16(le16_to_cpu(cpl->rx_csum))); skb->ip_summed = CHECKSUM_COMPLETE; } // unmap - dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr), + dma_unmap_page(dev, dma_unmap_addr(rx_info, dma_addr), dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE); rx_info->dma_addr = 0; len = min_t(u32, le16_to_cpu(cpl->len), rx_info->len); - dma_sync_single_range_for_cpu(priv->dev, rx_info->dma_addr, rx_info->page_offset, + dma_sync_single_range_for_cpu(dev, rx_info->dma_addr, rx_info->page_offset, rx_info->len, PCI_DMA_FROMDEVICE); __skb_fill_page_desc(skb, 0, page, rx_info->page_offset, len); @@ -433,12 +436,7 @@ int mqnic_process_rx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget) void mqnic_rx_irq(struct mqnic_cq_ring *cq) { - struct mqnic_priv *priv = cq->priv; - - if (likely(priv->port_up)) - napi_schedule_irqoff(&cq->napi); - else - mqnic_arm_cq(cq); + napi_schedule_irqoff(&cq->napi); } int mqnic_poll_rx_cq(struct napi_struct *napi, int budget) diff --git a/modules/mqnic/mqnic_tx.c b/modules/mqnic/mqnic_tx.c index b4f999ca4..c7171fc0c 100644 --- a/modules/mqnic/mqnic_tx.c +++ b/modules/mqnic/mqnic_tx.c @@ -36,7 +36,7 @@ #include #include "mqnic.h" -int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, +int mqnic_create_tx_ring(struct mqnic_if *interface, struct mqnic_ring **ring_ptr, int index, u8 __iomem *hw_addr) { struct mqnic_ring *ring; @@ -45,9 +45,8 @@ int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, if (!ring) return -ENOMEM; - ring->dev = priv->dev; - ring->ndev = priv->ndev; - ring->priv = priv; + ring->dev = interface->dev; + ring->interface = interface; ring->index = index; ring->active = 0; @@ -148,13 +147,15 @@ void mqnic_free_tx_ring(struct mqnic_ring *ring) ring->tx_info = NULL; } -int mqnic_activate_tx_ring(struct mqnic_ring *ring, struct mqnic_cq_ring *cq_ring) +int mqnic_activate_tx_ring(struct mqnic_ring *ring, struct mqnic_priv *priv, + struct mqnic_cq_ring *cq_ring) { mqnic_deactivate_tx_ring(ring); - if (!ring->buf || !cq_ring || cq_ring->handler || cq_ring->src_ring) + if (!ring->buf || !priv || !cq_ring || cq_ring->handler || cq_ring->src_ring) return -EINVAL; + ring->priv = priv; ring->cq_ring = cq_ring; cq_ring->src_ring = ring; cq_ring->handler = mqnic_tx_irq; @@ -189,6 +190,7 @@ void mqnic_deactivate_tx_ring(struct mqnic_ring *ring) ring->cq_ring->handler = NULL; } + ring->priv = NULL; ring->cq_ring = NULL; ring->active = 0; @@ -256,8 +258,9 @@ int mqnic_free_tx_buf(struct mqnic_ring *ring) int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget) { - struct mqnic_priv *priv = cq_ring->priv; + struct mqnic_if *interface = cq_ring->interface; struct mqnic_ring *tx_ring = cq_ring->src_ring; + struct mqnic_priv *priv = tx_ring->priv; struct mqnic_tx_info *tx_info; struct mqnic_cpl *cpl; struct skb_shared_hwtstamps hwts; @@ -270,7 +273,7 @@ int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget) int done = 0; int budget = napi_budget; - if (unlikely(!priv->port_up)) + if (unlikely(!priv || !priv->port_up)) return done; // prefetch for BQL @@ -290,8 +293,8 @@ int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget) // TX hardware timestamp if (unlikely(tx_info->ts_requested)) { - dev_info(priv->dev, "%s: TX TS requested", __func__); - hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, tx_ring, cpl); + dev_info(interface->dev, "%s: TX TS requested", __func__); + hwts.hwtstamp = mqnic_read_cpl_ts(interface->mdev, tx_ring, cpl); skb_tstamp_tx(tx_info->skb, &hwts); } // free TX descriptor @@ -342,12 +345,7 @@ int mqnic_process_tx_cq(struct mqnic_cq_ring *cq_ring, int napi_budget) void mqnic_tx_irq(struct mqnic_cq_ring *cq) { - struct mqnic_priv *priv = cq->priv; - - if (likely(priv->port_up)) - napi_schedule_irqoff(&cq->napi); - else - mqnic_arm_cq(cq); + napi_schedule_irqoff(&cq->napi); } int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)