mirror of
https://github.com/corundum/corundum.git
synced 2025-01-30 08:32:52 +08:00
More kernel module coding style updates
This commit is contained in:
parent
7ac4797336
commit
2adaf820b5
@ -218,7 +218,7 @@ struct mqnic_cq_ring {
|
||||
int ring_index;
|
||||
int eq_index;
|
||||
|
||||
void (*handler)(struct mqnic_cq_ring *);
|
||||
void (*handler)(struct mqnic_cq_ring *ring);
|
||||
|
||||
u32 hw_ptr_mask;
|
||||
u8 __iomem *hw_addr;
|
||||
@ -244,7 +244,7 @@ struct mqnic_eq_ring {
|
||||
|
||||
int irq;
|
||||
|
||||
void (*handler)(struct mqnic_eq_ring *);
|
||||
void (*handler)(struct mqnic_eq_ring *ring);
|
||||
|
||||
u32 hw_ptr_mask;
|
||||
u8 __iomem *hw_addr;
|
||||
@ -327,7 +327,7 @@ void mqnic_destroy_netdev(struct net_device *ndev);
|
||||
|
||||
// mqnic_port.c
|
||||
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
|
||||
int index, u8 __iomem *hw_addr);
|
||||
int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr);
|
||||
int mqnic_activate_port(struct mqnic_port *port);
|
||||
void mqnic_deactivate_port(struct mqnic_port *port);
|
||||
@ -342,7 +342,7 @@ void mqnic_port_set_rx_mtu(struct mqnic_port *port, u32 mtu);
|
||||
void mqnic_register_phc(struct mqnic_dev *mdev);
|
||||
void mqnic_unregister_phc(struct mqnic_dev *mdev);
|
||||
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring,
|
||||
const struct mqnic_cpl *cpl);
|
||||
const struct mqnic_cpl *cpl);
|
||||
|
||||
// mqnic_i2c.c
|
||||
struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *reg);
|
||||
@ -358,10 +358,10 @@ void mqnic_board_deinit(struct mqnic_dev *mqnic);
|
||||
|
||||
// mqnic_eq.c
|
||||
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr);
|
||||
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
|
||||
int int_index);
|
||||
int int_index);
|
||||
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring);
|
||||
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring);
|
||||
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring);
|
||||
@ -372,10 +372,10 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring);
|
||||
|
||||
// mqnic_cq.c
|
||||
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr);
|
||||
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring,
|
||||
int eq_index);
|
||||
int eq_index);
|
||||
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring);
|
||||
bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring);
|
||||
bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring);
|
||||
@ -385,43 +385,43 @@ void mqnic_arm_cq(struct mqnic_cq_ring *ring);
|
||||
|
||||
// mqnic_tx.c
|
||||
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
|
||||
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int cpl_index);
|
||||
int cpl_index);
|
||||
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring);
|
||||
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring);
|
||||
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring);
|
||||
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring);
|
||||
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index, int napi_budget);
|
||||
int index, int napi_budget);
|
||||
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
int napi_budget);
|
||||
int napi_budget);
|
||||
void mqnic_tx_irq(struct mqnic_cq_ring *cq);
|
||||
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget);
|
||||
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
// mqnic_rx.c
|
||||
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
|
||||
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int cpl_index);
|
||||
int cpl_index);
|
||||
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring);
|
||||
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring);
|
||||
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring);
|
||||
void mqnic_rx_write_head_ptr(struct mqnic_ring *ring);
|
||||
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index);
|
||||
int index);
|
||||
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index);
|
||||
int index);
|
||||
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
int napi_budget);
|
||||
int napi_budget);
|
||||
void mqnic_rx_irq(struct mqnic_cq_ring *cq);
|
||||
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget);
|
||||
|
||||
|
@ -453,9 +453,12 @@ static void mqnic_alveo_bmc_reg_write(struct mqnic_dev *mqnic, u32 reg, u32 val)
|
||||
static int mqnic_alveo_bmc_read_mac(struct mqnic_dev *mqnic, int index, char *mac)
|
||||
{
|
||||
uint32_t reg = 0x0281a0 + index * 8;
|
||||
uint32_t val = mqnic_alveo_bmc_reg_read(mqnic, reg);
|
||||
uint32_t val;
|
||||
|
||||
val = mqnic_alveo_bmc_reg_read(mqnic, reg);
|
||||
mac[0] = (val >> 8) & 0xff;
|
||||
mac[1] = val & 0xff;
|
||||
|
||||
val = mqnic_alveo_bmc_reg_read(mqnic, reg + 4);
|
||||
mac[2] = (val >> 24) & 0xff;
|
||||
mac[3] = (val >> 16) & 0xff;
|
||||
@ -581,18 +584,18 @@ static int mqnic_gecko_bmc_read(struct mqnic_dev *mqnic)
|
||||
if (val & BIT(18)) {
|
||||
// timed out
|
||||
dev_warn(mqnic->dev, "Timed out waiting for Gecko BMC response");
|
||||
msleep(10);
|
||||
msleep(20);
|
||||
return -2;
|
||||
}
|
||||
return val & 0xffff;
|
||||
} else {
|
||||
timeout--;
|
||||
if (timeout == 0) {
|
||||
dev_warn(mqnic->dev, "Timed out waiting for Gecko BMC interface");
|
||||
return -1;
|
||||
}
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
timeout--;
|
||||
if (timeout == 0) {
|
||||
dev_warn(mqnic->dev, "Timed out waiting for Gecko BMC interface");
|
||||
return -1;
|
||||
}
|
||||
usleep_range(1000, 100000);
|
||||
}
|
||||
|
||||
return -1;
|
||||
@ -601,6 +604,7 @@ static int mqnic_gecko_bmc_read(struct mqnic_dev *mqnic)
|
||||
static int mqnic_gecko_bmc_write(struct mqnic_dev *mqnic, u16 cmd, u32 data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mqnic_gecko_bmc_read(mqnic);
|
||||
|
||||
if (ret == -1)
|
||||
@ -627,9 +631,10 @@ static int mqnic_gecko_bmc_query(struct mqnic_dev *mqnic, u16 cmd, u32 data)
|
||||
static int mqnic_gecko_bmc_read_mac(struct mqnic_dev *mqnic, int index, char *mac)
|
||||
{
|
||||
int i;
|
||||
u16 val;
|
||||
|
||||
for (i = 0; i < ETH_ALEN; i += 2) {
|
||||
u16 val = mqnic_gecko_bmc_query(mqnic, 0x2003, 0 + index * ETH_ALEN + i);
|
||||
val = mqnic_gecko_bmc_query(mqnic, 0x2003, 0 + index * ETH_ALEN + i);
|
||||
if (val < 0)
|
||||
return val;
|
||||
mac[i] = val & 0xff;
|
||||
@ -711,7 +716,7 @@ static int mqnic_gecko_board_init(struct mqnic_dev *mqnic)
|
||||
uint16_t v_h = mqnic_gecko_bmc_query(mqnic, 0x7006, 0);
|
||||
|
||||
dev_info(mqnic->dev, "Gecko BMC version %d.%d.%d.%d",
|
||||
(v_h >> 8) & 0xff, v_h & 0xff, (v_l >> 8) & 0xff, v_l & 0xff);
|
||||
(v_h >> 8) & 0xff, v_h & 0xff, (v_l >> 8) & 0xff, v_l & 0xff);
|
||||
|
||||
mqnic_gecko_bmc_read_mac_list(mqnic, 8);
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ either expressed or implied, of The Regents of the University of California.
|
||||
#include "mqnic.h"
|
||||
|
||||
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||
{
|
||||
struct device *dev = priv->dev;
|
||||
struct mqnic_cq_ring *ring;
|
||||
@ -54,7 +54,7 @@ int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_pt
|
||||
|
||||
ring->buf_size = ring->size * ring->stride;
|
||||
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
|
||||
&ring->buf_dma_addr, GFP_KERNEL);
|
||||
&ring->buf_dma_addr, GFP_KERNEL);
|
||||
if (!ring->buf) {
|
||||
dev_err(dev, "Failed to allocate CQ ring DMA buffer");
|
||||
ret = -ENOMEM;
|
||||
@ -119,7 +119,7 @@ int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring,
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG);
|
||||
// set size and activate queue
|
||||
iowrite32(ilog2(ring->size) | MQNIC_CPL_QUEUE_ACTIVE_MASK,
|
||||
ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -155,5 +155,5 @@ void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring)
|
||||
void mqnic_arm_cq(struct mqnic_cq_ring *ring)
|
||||
{
|
||||
iowrite32(ring->eq_index | MQNIC_CPL_QUEUE_ARM_MASK,
|
||||
ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
|
||||
ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
|
||||
}
|
||||
|
@ -59,18 +59,18 @@ static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *v
|
||||
|
||||
if (map_size > mqnic->hw_regs_size) {
|
||||
dev_err(mqnic->dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <= %llu)",
|
||||
vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
|
||||
vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = remap_pfn_range(vma, vma->vm_start, mqnic->hw_regs_phys >> PAGE_SHIFT,
|
||||
map_size, pgprot_noncached(vma->vm_page_prot));
|
||||
map_size, pgprot_noncached(vma->vm_page_prot));
|
||||
|
||||
if (ret)
|
||||
dev_err(mqnic->dev, "mqnic_map_registers: remap_pfn_range failed for registers region");
|
||||
else
|
||||
dev_dbg(mqnic->dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p",
|
||||
&mqnic->hw_regs_phys, (void *)vma->vm_start);
|
||||
&mqnic->hw_regs_phys, (void *)vma->vm_start);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -84,7 +84,7 @@ static int mqnic_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return mqnic_map_registers(mqnic, vma);
|
||||
|
||||
dev_err(mqnic->dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu",
|
||||
vma->vm_pgoff);
|
||||
vma->vm_pgoff);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ either expressed or implied, of The Regents of the University of California.
|
||||
#include "mqnic.h"
|
||||
|
||||
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||
{
|
||||
struct device *dev = priv->dev;
|
||||
struct mqnic_eq_ring *ring;
|
||||
@ -54,7 +54,7 @@ int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_pt
|
||||
|
||||
ring->buf_size = ring->size * ring->stride;
|
||||
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
|
||||
&ring->buf_dma_addr, GFP_KERNEL);
|
||||
&ring->buf_dma_addr, GFP_KERNEL);
|
||||
if (!ring->buf) {
|
||||
dev_err(dev, "Failed to allocate EQ ring DMA buffer");
|
||||
ret = -ENOMEM;
|
||||
@ -78,9 +78,9 @@ int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_pt
|
||||
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||
// set pointers
|
||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask,
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask,
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
||||
// set size
|
||||
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
@ -106,7 +106,7 @@ void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_
|
||||
}
|
||||
|
||||
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
|
||||
int int_index)
|
||||
int int_index)
|
||||
{
|
||||
ring->int_index = int_index;
|
||||
|
||||
@ -119,12 +119,12 @@ int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
|
||||
iowrite32(int_index, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||
// set pointers
|
||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask,
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask,
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
||||
// set size and activate queue
|
||||
iowrite32(ilog2(ring->size) | MQNIC_EVENT_QUEUE_ACTIVE_MASK,
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -160,13 +160,14 @@ void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring)
|
||||
void mqnic_arm_eq(struct mqnic_eq_ring *ring)
|
||||
{
|
||||
iowrite32(ring->int_index | MQNIC_EVENT_QUEUE_ARM_MASK,
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||
ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||
}
|
||||
|
||||
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||
struct mqnic_event *event;
|
||||
struct mqnic_cq_ring *cq_ring;
|
||||
u32 eq_index;
|
||||
u32 eq_tail_ptr;
|
||||
int done = 0;
|
||||
@ -187,13 +188,12 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
|
||||
// transmit completion event
|
||||
if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count)) {
|
||||
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)",
|
||||
priv->port, le16_to_cpu(event->source), eq_index,
|
||||
le16_to_cpu(event->type));
|
||||
priv->port, le16_to_cpu(event->source), eq_index,
|
||||
le16_to_cpu(event->type));
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||
event, MQNIC_EVENT_SIZE, true);
|
||||
event, MQNIC_EVENT_SIZE, true);
|
||||
} else {
|
||||
struct mqnic_cq_ring *cq_ring =
|
||||
priv->tx_cpl_ring[le16_to_cpu(event->source)];
|
||||
cq_ring = priv->tx_cpl_ring[le16_to_cpu(event->source)];
|
||||
if (likely(cq_ring && cq_ring->handler))
|
||||
cq_ring->handler(cq_ring);
|
||||
}
|
||||
@ -201,22 +201,21 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
|
||||
// receive completion event
|
||||
if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count)) {
|
||||
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)",
|
||||
priv->port, le16_to_cpu(event->source), eq_index,
|
||||
le16_to_cpu(event->type));
|
||||
priv->port, le16_to_cpu(event->source), eq_index,
|
||||
le16_to_cpu(event->type));
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||
event, MQNIC_EVENT_SIZE, true);
|
||||
event, MQNIC_EVENT_SIZE, true);
|
||||
} else {
|
||||
struct mqnic_cq_ring *cq_ring =
|
||||
priv->rx_cpl_ring[le16_to_cpu(event->source)];
|
||||
cq_ring = priv->rx_cpl_ring[le16_to_cpu(event->source)];
|
||||
if (likely(cq_ring && cq_ring->handler))
|
||||
cq_ring->handler(cq_ring);
|
||||
}
|
||||
} else {
|
||||
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event type %d (index %d, source %d)",
|
||||
priv->port, le16_to_cpu(event->type), eq_index,
|
||||
le16_to_cpu(event->source));
|
||||
priv->port, le16_to_cpu(event->type), eq_index,
|
||||
le16_to_cpu(event->source));
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||
event, MQNIC_EVENT_SIZE, true);
|
||||
event, MQNIC_EVENT_SIZE, true);
|
||||
}
|
||||
|
||||
done++;
|
||||
|
@ -50,7 +50,7 @@ static void mqnic_get_drvinfo(struct net_device *ndev,
|
||||
strlcpy(drvinfo->version, DRIVER_VERSION, sizeof(drvinfo->version));
|
||||
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d",
|
||||
mdev->fw_ver >> 16, mdev->fw_ver & 0xffff);
|
||||
mdev->fw_ver >> 16, mdev->fw_ver & 0xffff);
|
||||
strlcpy(drvinfo->bus_info, dev_name(mdev->dev), sizeof(drvinfo->bus_info));
|
||||
}
|
||||
|
||||
@ -79,7 +79,7 @@ static int mqnic_get_ts_info(struct net_device *ndev,
|
||||
}
|
||||
|
||||
static int mqnic_read_module_eeprom(struct net_device *ndev,
|
||||
u16 offset, u16 len, u8 * data)
|
||||
u16 offset, u16 len, u8 *data)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||
|
||||
@ -138,7 +138,7 @@ static int mqnic_get_module_info(struct net_device *ndev,
|
||||
}
|
||||
|
||||
static int mqnic_get_module_eeprom(struct net_device *ndev,
|
||||
struct ethtool_eeprom *eeprom, u8 * data)
|
||||
struct ethtool_eeprom *eeprom, u8 *data)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||
int i = 0;
|
||||
@ -151,7 +151,7 @@ static int mqnic_get_module_eeprom(struct net_device *ndev,
|
||||
|
||||
while (i < eeprom->len) {
|
||||
read_len = mqnic_read_module_eeprom(ndev, eeprom->offset + i,
|
||||
eeprom->len - i, data + i);
|
||||
eeprom->len - i, data + i);
|
||||
|
||||
if (read_len == 0)
|
||||
return -EIO;
|
||||
|
@ -95,7 +95,7 @@ struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *
|
||||
// bit-bang algorithm setup
|
||||
algo = &bus->algo;
|
||||
algo->udelay = 5;
|
||||
algo->timeout = usecs_to_jiffies(2000);;
|
||||
algo->timeout = usecs_to_jiffies(2000);
|
||||
algo->setsda = mqnic_i2c_set_sda;
|
||||
algo->setscl = mqnic_i2c_set_scl;
|
||||
algo->getsda = mqnic_i2c_get_sda;
|
||||
@ -108,7 +108,7 @@ struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *
|
||||
adapter->algo_data = algo;
|
||||
adapter->dev.parent = mqnic->dev;
|
||||
snprintf(adapter->name, sizeof(adapter->name), "%s I2C%d", mqnic->name,
|
||||
mqnic->i2c_adapter_count);
|
||||
mqnic->i2c_adapter_count);
|
||||
|
||||
if (i2c_bit_add_bus(adapter)) {
|
||||
dev_err(mqnic->dev, "Failed to register I2C adapter");
|
||||
|
@ -110,6 +110,7 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
||||
{
|
||||
int ret = 0;
|
||||
struct mqnic_dev *mqnic;
|
||||
struct mqnic_priv *priv;
|
||||
struct device *dev = &pdev->dev;
|
||||
|
||||
int k = 0;
|
||||
@ -119,30 +120,32 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
||||
dev_info(dev, " Device: 0x%04x", pdev->device);
|
||||
dev_info(dev, " Class: 0x%06x", pdev->class);
|
||||
dev_info(dev, " PCI ID: %04x:%02x:%02x.%d", pci_domain_nr(pdev->bus),
|
||||
pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
|
||||
pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
|
||||
if (pdev->pcie_cap) {
|
||||
u16 devctl;
|
||||
u32 lnkcap;
|
||||
u16 lnksta;
|
||||
|
||||
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_DEVCTL, &devctl);
|
||||
pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &lnkcap);
|
||||
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_LNKSTA, &lnksta);
|
||||
|
||||
dev_info(dev, " Max payload size: %d bytes",
|
||||
128 << ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5));
|
||||
128 << ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5));
|
||||
dev_info(dev, " Max read request size: %d bytes",
|
||||
128 << ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12));
|
||||
128 << ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12));
|
||||
dev_info(dev, " Link capability: gen %d x%d",
|
||||
lnkcap & PCI_EXP_LNKCAP_SLS, (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
|
||||
lnkcap & PCI_EXP_LNKCAP_SLS, (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
|
||||
dev_info(dev, " Link status: gen %d x%d",
|
||||
lnksta & PCI_EXP_LNKSTA_CLS, (lnksta & PCI_EXP_LNKSTA_NLW) >> 4);
|
||||
lnksta & PCI_EXP_LNKSTA_CLS, (lnksta & PCI_EXP_LNKSTA_NLW) >> 4);
|
||||
dev_info(dev, " Relaxed ordering: %s",
|
||||
devctl & PCI_EXP_DEVCTL_RELAX_EN ? "enabled" : "disabled");
|
||||
devctl & PCI_EXP_DEVCTL_RELAX_EN ? "enabled" : "disabled");
|
||||
dev_info(dev, " Phantom functions: %s",
|
||||
devctl & PCI_EXP_DEVCTL_PHANTOM ? "enabled" : "disabled");
|
||||
devctl & PCI_EXP_DEVCTL_PHANTOM ? "enabled" : "disabled");
|
||||
dev_info(dev, " Extended tags: %s",
|
||||
devctl & PCI_EXP_DEVCTL_EXT_TAG ? "enabled" : "disabled");
|
||||
devctl & PCI_EXP_DEVCTL_EXT_TAG ? "enabled" : "disabled");
|
||||
dev_info(dev, " No snoop: %s",
|
||||
devctl & PCI_EXP_DEVCTL_NOSNOOP_EN ? "enabled" : "disabled");
|
||||
devctl & PCI_EXP_DEVCTL_NOSNOOP_EN ? "enabled" : "disabled");
|
||||
}
|
||||
#ifdef CONFIG_NUMA
|
||||
dev_info(dev, " NUMA node: %d", pdev->dev.numa_node);
|
||||
@ -171,7 +174,7 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
||||
|
||||
// Disable ASPM
|
||||
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
|
||||
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
|
||||
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
|
||||
|
||||
// Enable device
|
||||
ret = pci_enable_device_mem(pdev);
|
||||
@ -273,7 +276,7 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
||||
if (mqnic->if_count * mqnic->if_stride > mqnic->hw_regs_size) {
|
||||
ret = -EIO;
|
||||
dev_err(dev, "Invalid BAR configuration (%d IF * 0x%x > 0x%llx)",
|
||||
mqnic->if_count, mqnic->if_stride, mqnic->hw_regs_size);
|
||||
mqnic->if_count, mqnic->if_stride, mqnic->hw_regs_size);
|
||||
goto fail_map_bars;
|
||||
}
|
||||
|
||||
@ -288,7 +291,7 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
||||
// Set up interrupts
|
||||
for (k = 0; k < mqnic->irq_count; k++) {
|
||||
ret = pci_request_irq(pdev, k, mqnic_interrupt, NULL,
|
||||
mqnic, "%s-%d", mqnic->name, k);
|
||||
mqnic, "%s-%d", mqnic->name, k);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to request IRQ");
|
||||
goto fail_irq;
|
||||
@ -326,7 +329,7 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
||||
|
||||
// pass module I2C clients to net_device instances
|
||||
for (k = 0; k < mqnic->if_count; k++) {
|
||||
struct mqnic_priv *priv = netdev_priv(mqnic->ndev[k]);
|
||||
priv = netdev_priv(mqnic->ndev[k]);
|
||||
priv->mod_i2c_client = mqnic->mod_i2c_client[k];
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ static int mqnic_start_port(struct net_device *ndev)
|
||||
priv->rx_cpl_ring[k]->handler = mqnic_rx_irq;
|
||||
|
||||
netif_napi_add(ndev, &priv->rx_cpl_ring[k]->napi,
|
||||
mqnic_poll_rx_cq, NAPI_POLL_WEIGHT);
|
||||
mqnic_poll_rx_cq, NAPI_POLL_WEIGHT);
|
||||
napi_enable(&priv->rx_cpl_ring[k]->napi);
|
||||
|
||||
mqnic_arm_cq(priv->rx_cpl_ring[k]);
|
||||
@ -78,7 +78,7 @@ static int mqnic_start_port(struct net_device *ndev)
|
||||
priv->tx_cpl_ring[k]->handler = mqnic_tx_irq;
|
||||
|
||||
netif_tx_napi_add(ndev, &priv->tx_cpl_ring[k]->napi,
|
||||
mqnic_poll_tx_cq, NAPI_POLL_WEIGHT);
|
||||
mqnic_poll_tx_cq, NAPI_POLL_WEIGHT);
|
||||
napi_enable(&priv->tx_cpl_ring[k]->napi);
|
||||
|
||||
mqnic_arm_cq(priv->tx_cpl_ring[k]);
|
||||
@ -164,7 +164,7 @@ static int mqnic_stop_port(struct net_device *ndev)
|
||||
for (k = 0; k < priv->event_queue_count; k++)
|
||||
mqnic_deactivate_eq_ring(priv, priv->event_ring[k]);
|
||||
|
||||
msleep(10);
|
||||
msleep(20);
|
||||
|
||||
// free descriptors in TX queues
|
||||
for (k = 0; k < priv->tx_queue_count; k++)
|
||||
@ -245,7 +245,7 @@ void mqnic_update_stats(struct net_device *ndev)
|
||||
}
|
||||
|
||||
static void mqnic_get_stats64(struct net_device *ndev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||
|
||||
@ -302,7 +302,7 @@ static int mqnic_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
|
||||
|
||||
if (copy_to_user(ifr->ifr_data, &hwts_config, sizeof(hwts_config)))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -480,42 +480,42 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
|
||||
// allocate rings
|
||||
for (k = 0; k < priv->event_queue_count; k++) {
|
||||
ret = mqnic_create_eq_ring(priv, &priv->event_ring[k], 1024, MQNIC_EVENT_SIZE, k,
|
||||
hw_addr + priv->event_queue_offset + k * MQNIC_EVENT_QUEUE_STRIDE); // TODO configure/constant
|
||||
hw_addr + priv->event_queue_offset + k * MQNIC_EVENT_QUEUE_STRIDE); // TODO configure/constant
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < priv->tx_queue_count; k++) {
|
||||
ret = mqnic_create_tx_ring(priv, &priv->tx_ring[k], 1024, MQNIC_DESC_SIZE * desc_block_size, k,
|
||||
hw_addr + priv->tx_queue_offset + k * MQNIC_QUEUE_STRIDE); // TODO configure/constant
|
||||
hw_addr + priv->tx_queue_offset + k * MQNIC_QUEUE_STRIDE); // TODO configure/constant
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < priv->tx_cpl_queue_count; k++) {
|
||||
ret = mqnic_create_cq_ring(priv, &priv->tx_cpl_ring[k], 1024, MQNIC_CPL_SIZE, k,
|
||||
hw_addr + priv->tx_cpl_queue_offset + k * MQNIC_CPL_QUEUE_STRIDE); // TODO configure/constant
|
||||
hw_addr + priv->tx_cpl_queue_offset + k * MQNIC_CPL_QUEUE_STRIDE); // TODO configure/constant
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < priv->rx_queue_count; k++) {
|
||||
ret = mqnic_create_rx_ring(priv, &priv->rx_ring[k], 1024, MQNIC_DESC_SIZE, k,
|
||||
hw_addr + priv->rx_queue_offset + k * MQNIC_QUEUE_STRIDE); // TODO configure/constant
|
||||
hw_addr + priv->rx_queue_offset + k * MQNIC_QUEUE_STRIDE); // TODO configure/constant
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < priv->rx_cpl_queue_count; k++) {
|
||||
ret = mqnic_create_cq_ring(priv, &priv->rx_cpl_ring[k], 1024, MQNIC_CPL_SIZE, k,
|
||||
hw_addr + priv->rx_cpl_queue_offset + k * MQNIC_CPL_QUEUE_STRIDE); // TODO configure/constant
|
||||
hw_addr + priv->rx_cpl_queue_offset + k * MQNIC_CPL_QUEUE_STRIDE); // TODO configure/constant
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (k = 0; k < priv->port_count; k++) {
|
||||
ret = mqnic_create_port(priv, &priv->ports[k], k,
|
||||
hw_addr + priv->port_offset + k * priv->port_stride);
|
||||
hw_addr + priv->port_offset + k * priv->port_stride);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
@ -34,7 +34,7 @@ either expressed or implied, of The Regents of the University of California.
|
||||
#include "mqnic.h"
|
||||
|
||||
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
|
||||
int index, u8 __iomem *hw_addr)
|
||||
int index, u8 __iomem *hw_addr)
|
||||
{
|
||||
struct device *dev = priv->dev;
|
||||
struct mqnic_port *port;
|
||||
|
@ -35,7 +35,7 @@ either expressed or implied, of The Regents of the University of California.
|
||||
#include <linux/version.h>
|
||||
|
||||
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring,
|
||||
const struct mqnic_cpl *cpl)
|
||||
const struct mqnic_cpl *cpl)
|
||||
{
|
||||
u64 ts_s = le16_to_cpu(cpl->ts_s);
|
||||
u32 ts_ns = le32_to_cpu(cpl->ts_ns);
|
||||
@ -101,7 +101,7 @@ static int mqnic_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
|
||||
static int mqnic_phc_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
|
||||
struct ptp_system_timestamp *sts)
|
||||
struct ptp_system_timestamp *sts)
|
||||
{
|
||||
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
|
||||
|
||||
|
@ -34,7 +34,7 @@ either expressed or implied, of The Regents of the University of California.
|
||||
#include "mqnic.h"
|
||||
|
||||
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||
{
|
||||
struct device *dev = priv->dev;
|
||||
struct mqnic_ring *ring;
|
||||
@ -63,7 +63,7 @@ int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
|
||||
ring->buf_size = ring->size * ring->stride;
|
||||
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
|
||||
&ring->buf_dma_addr, GFP_KERNEL);
|
||||
&ring->buf_dma_addr, GFP_KERNEL);
|
||||
if (!ring->buf) {
|
||||
dev_err(dev, "Failed to allocate RX ring DMA buffer");
|
||||
ret = -ENOMEM;
|
||||
@ -91,7 +91,7 @@ int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
||||
// set size
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
*ring_ptr = ring;
|
||||
return 0;
|
||||
@ -122,7 +122,7 @@ void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr
|
||||
}
|
||||
|
||||
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int cpl_index)
|
||||
int cpl_index)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
@ -136,7 +136,7 @@ int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
||||
// set size and activate queue
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK,
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
mqnic_refill_rx_buffers(priv, ring);
|
||||
|
||||
@ -147,7 +147,7 @@ void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
}
|
||||
|
||||
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring)
|
||||
@ -171,13 +171,13 @@ void mqnic_rx_write_head_ptr(struct mqnic_ring *ring)
|
||||
}
|
||||
|
||||
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index)
|
||||
int index)
|
||||
{
|
||||
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
|
||||
struct page *page = rx_info->page;
|
||||
|
||||
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr),
|
||||
dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
|
||||
rx_info->dma_addr = 0;
|
||||
__free_pages(page, rx_info->page_order);
|
||||
rx_info->page = NULL;
|
||||
@ -203,7 +203,7 @@ int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
}
|
||||
|
||||
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index)
|
||||
int index)
|
||||
{
|
||||
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
|
||||
struct mqnic_desc *rx_desc = (struct mqnic_desc *)(ring->buf + index * ring->stride);
|
||||
@ -214,14 +214,14 @@ int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
|
||||
if (unlikely(page)) {
|
||||
dev_err(priv->dev, "mqnic_prepare_rx_desc skb not yet processed on port %d",
|
||||
priv->port);
|
||||
priv->port);
|
||||
return -1;
|
||||
}
|
||||
|
||||
page = dev_alloc_pages(page_order);
|
||||
if (unlikely(!page)) {
|
||||
dev_err(priv->dev, "mqnic_prepare_rx_desc failed to allocate memory on port %d",
|
||||
priv->port);
|
||||
priv->port);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -230,7 +230,7 @@ int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
|
||||
if (unlikely(dma_mapping_error(priv->dev, dma_addr))) {
|
||||
dev_err(priv->dev, "mqnic_prepare_rx_desc DMA mapping failed on port %d",
|
||||
priv->port);
|
||||
priv->port);
|
||||
__free_pages(page, page_order);
|
||||
return -1;
|
||||
}
|
||||
@ -268,7 +268,7 @@ void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
}
|
||||
|
||||
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
int napi_budget)
|
||||
int napi_budget)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||
struct mqnic_ring *ring = priv->rx_ring[cq_ring->ring_index];
|
||||
@ -304,16 +304,16 @@ int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
|
||||
if (unlikely(!page)) {
|
||||
dev_err(priv->dev, "mqnic_process_rx_cq ring %d null page at index %d",
|
||||
cq_ring->ring_index, ring_index);
|
||||
cq_ring->ring_index, ring_index);
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||
cpl, MQNIC_CPL_SIZE, true);
|
||||
cpl, MQNIC_CPL_SIZE, true);
|
||||
break;
|
||||
}
|
||||
|
||||
skb = napi_get_frags(&cq_ring->napi);
|
||||
if (unlikely(!skb)) {
|
||||
dev_err(priv->dev, "mqnic_process_rx_cq ring %d failed to allocate skb",
|
||||
cq_ring->ring_index);
|
||||
cq_ring->ring_index);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -331,13 +331,13 @@ int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
|
||||
// unmap
|
||||
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr),
|
||||
dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
|
||||
rx_info->dma_addr = 0;
|
||||
|
||||
len = min_t(u32, le16_to_cpu(cpl->len), rx_info->len);
|
||||
|
||||
dma_sync_single_range_for_cpu(priv->dev, rx_info->dma_addr, rx_info->page_offset,
|
||||
rx_info->len, PCI_DMA_FROMDEVICE);
|
||||
rx_info->len, PCI_DMA_FROMDEVICE);
|
||||
|
||||
__skb_fill_page_desc(skb, 0, page, rx_info->page_offset, len);
|
||||
rx_info->page = NULL;
|
||||
|
@ -35,7 +35,7 @@ either expressed or implied, of The Regents of the University of California.
|
||||
#include "mqnic.h"
|
||||
|
||||
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||
{
|
||||
struct device *dev = priv->dev;
|
||||
struct mqnic_ring *ring;
|
||||
@ -65,7 +65,7 @@ int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
|
||||
ring->buf_size = ring->size * ring->stride;
|
||||
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
|
||||
&ring->buf_dma_addr, GFP_KERNEL);
|
||||
&ring->buf_dma_addr, GFP_KERNEL);
|
||||
if (!ring->buf) {
|
||||
dev_err(dev, "Failed to allocate TX ring DMA buffer");
|
||||
ret = -ENOMEM;
|
||||
@ -93,7 +93,7 @@ int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
||||
// set size
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
*ring_ptr = ring;
|
||||
return 0;
|
||||
@ -124,7 +124,7 @@ void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr
|
||||
}
|
||||
|
||||
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int cpl_index)
|
||||
int cpl_index)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
@ -138,7 +138,7 @@ int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
||||
// set size and activate queue
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK,
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -147,7 +147,7 @@ void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
{
|
||||
// deactivate queue
|
||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||
}
|
||||
|
||||
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring)
|
||||
@ -171,7 +171,7 @@ void mqnic_tx_write_head_ptr(struct mqnic_ring *ring)
|
||||
}
|
||||
|
||||
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
int index, int napi_budget)
|
||||
int index, int napi_budget)
|
||||
{
|
||||
struct mqnic_tx_info *tx_info = &ring->tx_info[index];
|
||||
struct sk_buff *skb = tx_info->skb;
|
||||
@ -180,13 +180,13 @@ void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
prefetchw(&skb->users);
|
||||
|
||||
dma_unmap_single(priv->dev, dma_unmap_addr(tx_info, dma_addr),
|
||||
dma_unmap_len(tx_info, len), PCI_DMA_TODEVICE);
|
||||
dma_unmap_len(tx_info, len), PCI_DMA_TODEVICE);
|
||||
dma_unmap_addr_set(tx_info, dma_addr, 0);
|
||||
|
||||
// unmap frags
|
||||
for (i = 0; i < tx_info->frag_count; i++)
|
||||
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr,
|
||||
tx_info->frags[i].len, PCI_DMA_TODEVICE);
|
||||
tx_info->frags[i].len, PCI_DMA_TODEVICE);
|
||||
|
||||
napi_consume_skb(skb, napi_budget);
|
||||
tx_info->skb = NULL;
|
||||
@ -212,12 +212,13 @@ int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||
}
|
||||
|
||||
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
int napi_budget)
|
||||
int napi_budget)
|
||||
{
|
||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||
struct mqnic_ring *ring = priv->tx_ring[cq_ring->ring_index];
|
||||
struct mqnic_tx_info *tx_info;
|
||||
struct mqnic_cpl *cpl;
|
||||
struct skb_shared_hwtstamps hwts;
|
||||
u32 cq_index;
|
||||
u32 cq_tail_ptr;
|
||||
u32 ring_index;
|
||||
@ -247,7 +248,6 @@ int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||
|
||||
// TX hardware timestamp
|
||||
if (unlikely(tx_info->ts_requested)) {
|
||||
struct skb_shared_hwtstamps hwts;
|
||||
dev_info(priv->dev, "mqnic_process_tx_cq TX TS requested");
|
||||
hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
|
||||
skb_tstamp_tx(tx_info->skb, &hwts);
|
||||
@ -327,10 +327,11 @@ int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
|
||||
}
|
||||
|
||||
static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
struct mqnic_tx_info *tx_info,
|
||||
struct mqnic_desc *tx_desc, struct sk_buff *skb)
|
||||
struct mqnic_tx_info *tx_info, struct mqnic_desc *tx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
const skb_frag_t *frag;
|
||||
u32 i;
|
||||
u32 len;
|
||||
dma_addr_t dma_addr;
|
||||
@ -340,7 +341,7 @@ static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||
tx_info->frag_count = 0;
|
||||
|
||||
for (i = 0; i < shinfo->nr_frags; i++) {
|
||||
const skb_frag_t *frag = &shinfo->frags[i];
|
||||
frag = &shinfo->frags[i];
|
||||
len = skb_frag_size(frag);
|
||||
dma_addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
|
||||
@ -386,7 +387,7 @@ map_error:
|
||||
// unmap frags
|
||||
for (i = 0; i < tx_info->frag_count; i++)
|
||||
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr,
|
||||
tx_info->frags[i].len, PCI_DMA_TODEVICE);
|
||||
tx_info->frags[i].len, PCI_DMA_TODEVICE);
|
||||
|
||||
// update tx_info
|
||||
tx_info->skb = NULL;
|
||||
@ -444,7 +445,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
|
||||
if (csum_start > 255 || csum_offset > 127) {
|
||||
dev_info(priv->dev, "mqnic_start_xmit Hardware checksum fallback start %d offset %d",
|
||||
csum_start, csum_offset);
|
||||
csum_start, csum_offset);
|
||||
|
||||
// offset out of range, fall back on software checksum
|
||||
if (skb_checksum_help(skb)) {
|
||||
@ -482,7 +483,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
stop_queue = mqnic_is_tx_ring_full(ring);
|
||||
if (unlikely(stop_queue)) {
|
||||
dev_info(priv->dev, "mqnic_start_xmit TX ring %d full on port %d",
|
||||
ring_index, priv->port);
|
||||
ring_index, priv->port);
|
||||
netif_tx_stop_queue(ring->tx_queue);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user