1
0
mirror of https://github.com/corundum/corundum.git synced 2025-01-30 08:32:52 +08:00

Fix kernel module coding style

This commit is contained in:
Alex Forencich 2021-10-08 18:31:53 -07:00
parent 1bce5827c9
commit 5b49f09baa
15 changed files with 2696 additions and 2921 deletions

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -53,267 +53,266 @@ either expressed or implied, of The Regents of the University of California.
struct mqnic_dev;
struct mqnic_board_ops {
int (*init)(struct mqnic_dev *mqnic);
void (*deinit)(struct mqnic_dev *mqnic);
int (*init)(struct mqnic_dev *mqnic);
void (*deinit)(struct mqnic_dev *mqnic);
};
struct mqnic_i2c_bus
{
struct mqnic_dev *mqnic;
struct mqnic_i2c_bus {
struct mqnic_dev *mqnic;
u8 __iomem *scl_in_reg;
u8 __iomem *scl_out_reg;
u8 __iomem *sda_in_reg;
u8 __iomem *sda_out_reg;
u8 __iomem *scl_in_reg;
u8 __iomem *scl_out_reg;
u8 __iomem *sda_in_reg;
u8 __iomem *sda_out_reg;
uint32_t scl_in_mask;
uint32_t scl_out_mask;
uint32_t sda_in_mask;
uint32_t sda_out_mask;
uint32_t scl_in_mask;
uint32_t scl_out_mask;
uint32_t sda_in_mask;
uint32_t sda_out_mask;
struct list_head head;
struct list_head head;
struct i2c_algo_bit_data algo;
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
struct i2c_adapter adapter;
};
struct mqnic_dev {
struct device *dev;
struct pci_dev *pdev;
struct device *dev;
struct pci_dev *pdev;
resource_size_t hw_regs_size;
phys_addr_t hw_regs_phys;
u8 __iomem *hw_addr;
u8 __iomem *phc_hw_addr;
resource_size_t hw_regs_size;
phys_addr_t hw_regs_phys;
u8 __iomem *hw_addr;
u8 __iomem *phc_hw_addr;
resource_size_t app_hw_regs_size;
phys_addr_t app_hw_regs_phys;
u8 __iomem *app_hw_addr;
resource_size_t app_hw_regs_size;
phys_addr_t app_hw_regs_phys;
u8 __iomem *app_hw_addr;
resource_size_t ram_hw_regs_size;
phys_addr_t ram_hw_regs_phys;
u8 __iomem *ram_hw_addr;
resource_size_t ram_hw_regs_size;
phys_addr_t ram_hw_regs_phys;
u8 __iomem *ram_hw_addr;
struct mutex state_lock;
struct mutex state_lock;
int mac_count;
u8 mac_list[MQNIC_MAX_IF][ETH_ALEN];
int mac_count;
u8 mac_list[MQNIC_MAX_IF][ETH_ALEN];
char name[16];
char name[16];
int irq_count;
int irq_map[32];
int irq_count;
int irq_map[32];
unsigned int id;
struct list_head dev_list_node;
unsigned int id;
struct list_head dev_list_node;
struct miscdevice misc_dev;
struct miscdevice misc_dev;
u32 fw_id;
u32 fw_ver;
u32 board_id;
u32 board_ver;
u32 fw_id;
u32 fw_ver;
u32 board_id;
u32 board_ver;
u32 phc_count;
u32 phc_offset;
u32 phc_count;
u32 phc_offset;
u32 if_count;
u32 if_stride;
u32 if_csr_offset;
u32 if_count;
u32 if_stride;
u32 if_csr_offset;
struct net_device *ndev[MQNIC_MAX_IF];
struct net_device *ndev[MQNIC_MAX_IF];
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct mqnic_board_ops *board_ops;
struct mqnic_board_ops *board_ops;
struct list_head i2c_bus;
int i2c_adapter_count;
struct list_head i2c_bus;
int i2c_adapter_count;
int mod_i2c_client_count;
struct i2c_client *mod_i2c_client[MQNIC_MAX_IF];
struct i2c_client *eeprom_i2c_client;
int mod_i2c_client_count;
struct i2c_client *mod_i2c_client[MQNIC_MAX_IF];
struct i2c_client *eeprom_i2c_client;
};
struct mqnic_frag {
dma_addr_t dma_addr;
u32 len;
dma_addr_t dma_addr;
u32 len;
};
struct mqnic_tx_info {
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(dma_addr);
DEFINE_DMA_UNMAP_LEN(len);
u32 frag_count;
struct mqnic_frag frags[MQNIC_MAX_FRAGS-1];
int ts_requested;
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(dma_addr);
DEFINE_DMA_UNMAP_LEN(len);
u32 frag_count;
struct mqnic_frag frags[MQNIC_MAX_FRAGS - 1];
int ts_requested;
};
struct mqnic_rx_info {
struct page *page;
u32 page_order;
u32 page_offset;
dma_addr_t dma_addr;
u32 len;
struct page *page;
u32 page_order;
u32 page_offset;
dma_addr_t dma_addr;
u32 len;
};
struct mqnic_ring {
// written on enqueue (i.e. start_xmit)
u32 head_ptr;
u64 bytes;
u64 packets;
u64 dropped_packets;
struct netdev_queue *tx_queue;
// written on enqueue (i.e. start_xmit)
u32 head_ptr;
u64 bytes;
u64 packets;
u64 dropped_packets;
struct netdev_queue *tx_queue;
// written from completion
u32 tail_ptr ____cacheline_aligned_in_smp;
u32 clean_tail_ptr;
u64 ts_s;
u8 ts_valid;
// written from completion
u32 tail_ptr ____cacheline_aligned_in_smp;
u32 clean_tail_ptr;
u64 ts_s;
u8 ts_valid;
// mostly constant
u32 size;
u32 full_size;
u32 size_mask;
u32 stride;
// mostly constant
u32 size;
u32 full_size;
u32 size_mask;
u32 stride;
u32 cpl_index;
u32 cpl_index;
u32 mtu;
u32 page_order;
u32 mtu;
u32 page_order;
u32 desc_block_size;
u32 log_desc_block_size;
u32 desc_block_size;
u32 log_desc_block_size;
size_t buf_size;
u8 *buf;
dma_addr_t buf_dma_addr;
size_t buf_size;
u8 *buf;
dma_addr_t buf_dma_addr;
union {
struct mqnic_tx_info *tx_info;
struct mqnic_rx_info *rx_info;
};
union {
struct mqnic_tx_info *tx_info;
struct mqnic_rx_info *rx_info;
};
u32 hw_ptr_mask;
u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr;
u32 hw_ptr_mask;
u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr;
} ____cacheline_aligned_in_smp;
struct mqnic_cq_ring {
u32 head_ptr;
u32 head_ptr;
u32 tail_ptr;
u32 tail_ptr;
u32 size;
u32 size_mask;
u32 stride;
u32 size;
u32 size_mask;
u32 stride;
size_t buf_size;
u8 *buf;
dma_addr_t buf_dma_addr;
size_t buf_size;
u8 *buf;
dma_addr_t buf_dma_addr;
struct net_device *ndev;
struct napi_struct napi;
int ring_index;
int eq_index;
struct net_device *ndev;
struct napi_struct napi;
int ring_index;
int eq_index;
void (*handler) (struct mqnic_cq_ring *);
void (*handler)(struct mqnic_cq_ring *);
u32 hw_ptr_mask;
u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr;
u32 hw_ptr_mask;
u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr;
};
struct mqnic_eq_ring {
u32 head_ptr;
u32 head_ptr;
u32 tail_ptr;
u32 tail_ptr;
u32 size;
u32 size_mask;
u32 stride;
u32 size;
u32 size_mask;
u32 stride;
size_t buf_size;
u8 *buf;
dma_addr_t buf_dma_addr;
size_t buf_size;
u8 *buf;
dma_addr_t buf_dma_addr;
struct net_device *ndev;
int int_index;
struct net_device *ndev;
int int_index;
int irq;
int irq;
void (*handler) (struct mqnic_eq_ring *);
void (*handler)(struct mqnic_eq_ring *);
u32 hw_ptr_mask;
u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr;
u32 hw_ptr_mask;
u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr;
};
struct mqnic_port {
struct device *dev;
struct net_device *ndev;
struct device *dev;
struct net_device *ndev;
int index;
int index;
u32 tx_queue_count;
u32 tx_queue_count;
u32 port_id;
u32 port_features;
u32 port_mtu;
u32 sched_count;
u32 sched_offset;
u32 sched_stride;
u32 sched_type;
u32 port_id;
u32 port_features;
u32 port_mtu;
u32 sched_count;
u32 sched_offset;
u32 sched_stride;
u32 sched_type;
u8 __iomem *hw_addr;
u8 __iomem *hw_addr;
};
struct mqnic_priv {
struct device *dev;
struct net_device *ndev;
struct mqnic_dev *mdev;
struct device *dev;
struct net_device *ndev;
struct mqnic_dev *mdev;
spinlock_t stats_lock;
spinlock_t stats_lock;
bool registered;
int port;
bool port_up;
bool registered;
int port;
bool port_up;
u32 if_id;
u32 if_features;
u32 event_queue_count;
u32 event_queue_offset;
u32 tx_queue_count;
u32 tx_queue_offset;
u32 tx_cpl_queue_count;
u32 tx_cpl_queue_offset;
u32 rx_queue_count;
u32 rx_queue_offset;
u32 rx_cpl_queue_count;
u32 rx_cpl_queue_offset;
u32 port_count;
u32 port_offset;
u32 port_stride;
u32 if_id;
u32 if_features;
u32 event_queue_count;
u32 event_queue_offset;
u32 tx_queue_count;
u32 tx_queue_offset;
u32 tx_cpl_queue_count;
u32 tx_cpl_queue_offset;
u32 rx_queue_count;
u32 rx_queue_offset;
u32 rx_cpl_queue_count;
u32 rx_cpl_queue_offset;
u32 port_count;
u32 port_offset;
u32 port_stride;
u32 max_desc_block_size;
u32 max_desc_block_size;
u8 __iomem *hw_addr;
u8 __iomem *csr_hw_addr;
u8 __iomem *hw_addr;
u8 __iomem *csr_hw_addr;
struct mqnic_eq_ring *event_ring[MQNIC_MAX_EVENT_RINGS];
struct mqnic_ring *tx_ring[MQNIC_MAX_TX_RINGS];
struct mqnic_cq_ring *tx_cpl_ring[MQNIC_MAX_TX_CPL_RINGS];
struct mqnic_ring *rx_ring[MQNIC_MAX_RX_RINGS];
struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS];
struct mqnic_port *ports[MQNIC_MAX_PORTS];
struct mqnic_eq_ring *event_ring[MQNIC_MAX_EVENT_RINGS];
struct mqnic_ring *tx_ring[MQNIC_MAX_TX_RINGS];
struct mqnic_cq_ring *tx_cpl_ring[MQNIC_MAX_TX_CPL_RINGS];
struct mqnic_ring *rx_ring[MQNIC_MAX_RX_RINGS];
struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS];
struct mqnic_port *ports[MQNIC_MAX_PORTS];
struct hwtstamp_config hwts_config;
struct hwtstamp_config hwts_config;
struct i2c_client *mod_i2c_client;
struct i2c_client *mod_i2c_client;
};
// mqnic_main.c
@ -327,7 +326,8 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr);
void mqnic_destroy_netdev(struct net_device *ndev);
// mqnic_port.c
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr, int index, u8 __iomem *hw_addr);
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
int index, u8 __iomem *hw_addr);
void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr);
int mqnic_activate_port(struct mqnic_port *port);
void mqnic_deactivate_port(struct mqnic_port *port);
@ -341,7 +341,8 @@ void mqnic_port_set_rx_mtu(struct mqnic_port *port, u32 mtu);
// mqnic_ptp.c
void mqnic_register_phc(struct mqnic_dev *mdev);
void mqnic_unregister_phc(struct mqnic_dev *mdev);
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring, const struct mqnic_cpl *cpl);
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring,
const struct mqnic_cpl *cpl);
// mqnic_i2c.c
struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *reg);
@ -356,9 +357,11 @@ int mqnic_board_init(struct mqnic_dev *mqnic);
void mqnic_board_deinit(struct mqnic_dev *mqnic);
// mqnic_eq.c
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr);
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring, int int_index);
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
int int_index);
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring);
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring);
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring);
@ -368,9 +371,11 @@ void mqnic_arm_eq(struct mqnic_eq_ring *ring);
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring);
// mqnic_cq.c
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr);
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring, int eq_index);
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring,
int eq_index);
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring);
bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring);
bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring);
@ -379,35 +384,44 @@ void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring);
void mqnic_arm_cq(struct mqnic_cq_ring *ring);
// mqnic_tx.c
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index);
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
int cpl_index);
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring);
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring);
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring);
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring);
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index, int napi_budget);
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index, int napi_budget);
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget);
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
int napi_budget);
void mqnic_tx_irq(struct mqnic_cq_ring *cq);
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget);
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev);
// mqnic_rx.c
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index);
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
int cpl_index);
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring);
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring);
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring);
void mqnic_rx_write_head_ptr(struct mqnic_ring *ring);
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index);
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index);
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index);
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index);
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring);
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget);
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
int napi_budget);
void mqnic_rx_irq(struct mqnic_cq_ring *cq);
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget);

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -33,126 +33,127 @@ either expressed or implied, of The Regents of the University of California.
#include "mqnic.h"
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr)
{
struct device *dev = priv->dev;
struct mqnic_cq_ring *ring;
int ret;
struct device *dev = priv->dev;
struct mqnic_cq_ring *ring;
int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
{
dev_err(dev, "Failed to allocate CQ ring");
return -ENOMEM;
}
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
dev_err(dev, "Failed to allocate CQ ring");
return -ENOMEM;
}
ring->ndev = priv->ndev;
ring->ndev = priv->ndev;
ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size-1;
ring->stride = roundup_pow_of_two(stride);
ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size - 1;
ring->stride = roundup_pow_of_two(stride);
ring->buf_size = ring->size*ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf)
{
dev_err(dev, "Failed to allocate CQ ring DMA buffer");
ret = -ENOMEM;
goto fail_ring;
}
ring->buf_size = ring->size * ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
&ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf) {
dev_err(dev, "Failed to allocate CQ ring DMA buffer");
ret = -ENOMEM;
goto fail_ring;
}
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_CPL_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_CPL_QUEUE_TAIL_PTR_REG;
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->head_ptr = 0;
ring->tail_ptr = 0;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+4);
// set interrupt index
iowrite32(0, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// deactivate queue
iowrite32(0, ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 4);
// set interrupt index
iowrite32(0, ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring;
return 0;
*ring_ptr = ring;
return 0;
fail_ring:
kfree(ring);
*ring_ptr = NULL;
return ret;
kfree(ring);
*ring_ptr = NULL;
return ret;
}
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr)
{
struct device *dev = priv->dev;
struct mqnic_cq_ring *ring = *ring_ptr;
*ring_ptr = NULL;
struct device *dev = priv->dev;
struct mqnic_cq_ring *ring = *ring_ptr;
*ring_ptr = NULL;
mqnic_deactivate_cq_ring(priv, ring);
mqnic_deactivate_cq_ring(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kfree(ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kfree(ring);
}
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring, int eq_index)
{
ring->eq_index = eq_index;
ring->eq_index = eq_index;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+4);
// set interrupt index
iowrite32(eq_index, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | MQNIC_CPL_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// deactivate queue
iowrite32(0, ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 4);
// set interrupt index
iowrite32(eq_index, ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | MQNIC_CPL_QUEUE_ACTIVE_MASK,
ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
return 0;
return 0;
}
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring)
{
// deactivate queue
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// disarm queue
iowrite32(ring->eq_index, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
// deactivate queue
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// disarm queue
iowrite32(ring->eq_index, ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
}
bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring)
{
return ring->head_ptr == ring->tail_ptr;
return ring->head_ptr == ring->tail_ptr;
}
bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring)
{
return ring->head_ptr - ring->tail_ptr >= ring->size;
return ring->head_ptr - ring->tail_ptr >= ring->size;
}
void mqnic_cq_read_head_ptr(struct mqnic_cq_ring *ring)
{
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
}
void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring)
{
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
}
void mqnic_arm_cq(struct mqnic_cq_ring *ring)
{
iowrite32(ring->eq_index | MQNIC_CPL_QUEUE_ARM_MASK, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
iowrite32(ring->eq_index | MQNIC_CPL_QUEUE_ARM_MASK,
ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
}

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -38,100 +38,89 @@ either expressed or implied, of The Regents of the University of California.
static int mqnic_open(struct inode *inode, struct file *file)
{
// struct miscdevice *miscdev = file->private_data;
// struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
// struct miscdevice *miscdev = file->private_data;
// struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
return 0;
return 0;
}
static int mqnic_release(struct inode *inode, struct file *file)
{
// struct miscdevice *miscdev = file->private_data;
// struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
// struct miscdevice *miscdev = file->private_data;
// struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
return 0;
return 0;
}
static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *vma)
{
size_t map_size = vma->vm_end - vma->vm_start;
int ret;
size_t map_size = vma->vm_end - vma->vm_start;
int ret;
if (map_size > mqnic->hw_regs_size)
{
dev_err(mqnic->dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <= %llu)", vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
return -EINVAL;
}
if (map_size > mqnic->hw_regs_size) {
dev_err(mqnic->dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <= %llu)",
vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
return -EINVAL;
}
ret = remap_pfn_range(vma, vma->vm_start, mqnic->hw_regs_phys >> PAGE_SHIFT, map_size, pgprot_noncached(vma->vm_page_prot));
ret = remap_pfn_range(vma, vma->vm_start, mqnic->hw_regs_phys >> PAGE_SHIFT,
map_size, pgprot_noncached(vma->vm_page_prot));
if (ret)
{
dev_err(mqnic->dev, "mqnic_map_registers: remap_pfn_range failed for registers region");
}
else
{
dev_dbg(mqnic->dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p", &mqnic->hw_regs_phys, (void *)vma->vm_start);
}
if (ret)
dev_err(mqnic->dev, "mqnic_map_registers: remap_pfn_range failed for registers region");
else
dev_dbg(mqnic->dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p",
&mqnic->hw_regs_phys, (void *)vma->vm_start);
return ret;
return ret;
}
static int mqnic_mmap(struct file *file, struct vm_area_struct *vma)
{
struct miscdevice *miscdev = file->private_data;
struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
int ret;
struct miscdevice *miscdev = file->private_data;
struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
if (vma->vm_pgoff == 0)
{
ret = mqnic_map_registers(mqnic, vma);
}
else
{
goto fail_invalid_offset;
}
if (vma->vm_pgoff == 0)
return mqnic_map_registers(mqnic, vma);
return ret;
fail_invalid_offset:
dev_err(mqnic->dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu", vma->vm_pgoff);
return -EINVAL;
dev_err(mqnic->dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu",
vma->vm_pgoff);
return -EINVAL;
}
static long mqnic_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct miscdevice *miscdev = file->private_data;
struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
struct miscdevice *miscdev = file->private_data;
struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
if (_IOC_TYPE(cmd) != MQNIC_IOCTL_TYPE)
return -ENOTTY;
if (_IOC_TYPE(cmd) != MQNIC_IOCTL_TYPE)
return -ENOTTY;
switch (cmd) {
case MQNIC_IOCTL_INFO:
{
struct mqnic_ioctl_info ctl;
switch (cmd) {
case MQNIC_IOCTL_INFO:
{
struct mqnic_ioctl_info ctl;
ctl.fw_id = mqnic->fw_id;
ctl.fw_ver = mqnic->fw_ver;
ctl.board_id = mqnic->board_id;
ctl.board_ver = mqnic->board_ver;
ctl.regs_size = mqnic->hw_regs_size;
ctl.fw_id = mqnic->fw_id;
ctl.fw_ver = mqnic->fw_ver;
ctl.board_id = mqnic->board_id;
ctl.board_ver = mqnic->board_ver;
ctl.regs_size = mqnic->hw_regs_size;
if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl)) != 0)
return -EFAULT;
if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl)) != 0)
return -EFAULT;
return 0;
}
default:
return -ENOTTY;
}
return 0;
}
default:
return -ENOTTY;
}
}
const struct file_operations mqnic_fops = {
.owner = THIS_MODULE,
.open = mqnic_open,
.release = mqnic_release,
.mmap = mqnic_mmap,
.unlocked_ioctl = mqnic_ioctl,
.owner = THIS_MODULE,
.open = mqnic_open,
.release = mqnic_release,
.mmap = mqnic_mmap,
.unlocked_ioctl = mqnic_ioctl,
};

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -33,200 +33,199 @@ either expressed or implied, of The Regents of the University of California.
#include "mqnic.h"
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr)
{
struct device *dev = priv->dev;
struct mqnic_eq_ring *ring;
int ret;
struct device *dev = priv->dev;
struct mqnic_eq_ring *ring;
int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
{
dev_err(dev, "Failed to allocate EQ ring");
return -ENOMEM;
}
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
dev_err(dev, "Failed to allocate EQ ring");
return -ENOMEM;
}
ring->ndev = priv->ndev;
ring->ndev = priv->ndev;
ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size-1;
ring->stride = roundup_pow_of_two(stride);
ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size - 1;
ring->stride = roundup_pow_of_two(stride);
ring->buf_size = ring->size*ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf)
{
dev_err(dev, "Failed to allocate EQ ring DMA buffer");
ret = -ENOMEM;
goto fail_ring;
}
ring->buf_size = ring->size * ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
&ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf) {
dev_err(dev, "Failed to allocate EQ ring DMA buffer");
ret = -ENOMEM;
goto fail_ring;
}
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG;
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->head_ptr = 0;
ring->tail_ptr = 0;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+4);
// set interrupt index
iowrite32(0, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// deactivate queue
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 4);
// set interrupt index
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask,
ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask,
ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring;
return 0;
*ring_ptr = ring;
return 0;
fail_ring:
kfree(ring);
*ring_ptr = NULL;
return ret;
kfree(ring);
*ring_ptr = NULL;
return ret;
}
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr)
{
struct device *dev = priv->dev;
struct mqnic_eq_ring *ring = *ring_ptr;
*ring_ptr = NULL;
struct device *dev = priv->dev;
struct mqnic_eq_ring *ring = *ring_ptr;
*ring_ptr = NULL;
mqnic_deactivate_eq_ring(priv, ring);
mqnic_deactivate_eq_ring(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kfree(ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kfree(ring);
}
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring, int int_index)
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
int int_index)
{
ring->int_index = int_index;
ring->int_index = int_index;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+4);
// set interrupt index
iowrite32(int_index, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | MQNIC_EVENT_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// deactivate queue
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 4);
// set interrupt index
iowrite32(int_index, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask,
ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask,
ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | MQNIC_EVENT_QUEUE_ACTIVE_MASK,
ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
return 0;
return 0;
}
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring)
{
// deactivate queue
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// disarm queue
iowrite32(ring->int_index, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
// deactivate queue
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// disarm queue
iowrite32(ring->int_index, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
}
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring)
{
return ring->head_ptr == ring->tail_ptr;
return ring->head_ptr == ring->tail_ptr;
}
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring)
{
return ring->head_ptr - ring->tail_ptr >= ring->size;
return ring->head_ptr - ring->tail_ptr >= ring->size;
}
void mqnic_eq_read_head_ptr(struct mqnic_eq_ring *ring)
{
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
}
void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring)
{
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
}
void mqnic_arm_eq(struct mqnic_eq_ring *ring)
{
iowrite32(ring->int_index | MQNIC_EVENT_QUEUE_ARM_MASK, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
iowrite32(ring->int_index | MQNIC_EVENT_QUEUE_ARM_MASK,
ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
}
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_event *event;
u32 eq_index;
u32 eq_tail_ptr;
int done = 0;
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_event *event;
u32 eq_index;
u32 eq_tail_ptr;
int done = 0;
if (unlikely(!priv->port_up))
{
return;
}
if (unlikely(!priv->port_up))
return;
// read head pointer from NIC
mqnic_eq_read_head_ptr(eq_ring);
// read head pointer from NIC
mqnic_eq_read_head_ptr(eq_ring);
eq_tail_ptr = eq_ring->tail_ptr;
eq_index = eq_tail_ptr & eq_ring->size_mask;
eq_tail_ptr = eq_ring->tail_ptr;
eq_index = eq_tail_ptr & eq_ring->size_mask;
while (eq_ring->head_ptr != eq_tail_ptr)
{
event = (struct mqnic_event *)(eq_ring->buf + eq_index*eq_ring->stride);
while (eq_ring->head_ptr != eq_tail_ptr) {
event = (struct mqnic_event *)(eq_ring->buf + eq_index * eq_ring->stride);
if (event->type == MQNIC_EVENT_TYPE_TX_CPL)
{
// transmit completion event
if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count))
{
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", priv->port, le16_to_cpu(event->source), eq_index, le16_to_cpu(event->type));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true);
}
else
{
struct mqnic_cq_ring *cq_ring = priv->tx_cpl_ring[le16_to_cpu(event->source)];
if (likely(cq_ring && cq_ring->handler))
{
cq_ring->handler(cq_ring);
}
}
}
else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL)
{
// receive completion event
if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count))
{
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", priv->port, le16_to_cpu(event->source), eq_index, le16_to_cpu(event->type));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true);
}
else
{
struct mqnic_cq_ring *cq_ring = priv->rx_cpl_ring[le16_to_cpu(event->source)];
if (likely(cq_ring && cq_ring->handler))
{
cq_ring->handler(cq_ring);
}
}
}
else
{
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event type %d (index %d, source %d)", priv->port, le16_to_cpu(event->type), eq_index, le16_to_cpu(event->source));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true);
}
if (event->type == MQNIC_EVENT_TYPE_TX_CPL) {
// transmit completion event
if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count)) {
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)",
priv->port, le16_to_cpu(event->source), eq_index,
le16_to_cpu(event->type));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
event, MQNIC_EVENT_SIZE, true);
} else {
struct mqnic_cq_ring *cq_ring =
priv->tx_cpl_ring[le16_to_cpu(event->source)];
if (likely(cq_ring && cq_ring->handler))
cq_ring->handler(cq_ring);
}
} else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL) {
// receive completion event
if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count)) {
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)",
priv->port, le16_to_cpu(event->source), eq_index,
le16_to_cpu(event->type));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
event, MQNIC_EVENT_SIZE, true);
} else {
struct mqnic_cq_ring *cq_ring =
priv->rx_cpl_ring[le16_to_cpu(event->source)];
if (likely(cq_ring && cq_ring->handler))
cq_ring->handler(cq_ring);
}
} else {
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event type %d (index %d, source %d)",
priv->port, le16_to_cpu(event->type), eq_index,
le16_to_cpu(event->source));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
event, MQNIC_EVENT_SIZE, true);
}
done++;
done++;
eq_tail_ptr++;
eq_index = eq_tail_ptr & eq_ring->size_mask;
}
eq_tail_ptr++;
eq_index = eq_tail_ptr & eq_ring->size_mask;
}
// update eq tail
eq_ring->tail_ptr = eq_tail_ptr;
mqnic_eq_write_tail_ptr(eq_ring);
// update eq tail
eq_ring->tail_ptr = eq_tail_ptr;
mqnic_eq_write_tail_ptr(eq_ring);
}

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -40,143 +40,136 @@ either expressed or implied, of The Regents of the University of California.
#define SFF_MODULE_ID_QSFP_PLUS 0x0d
#define SFF_MODULE_ID_QSFP28 0x11
static void mqnic_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo)
static void mqnic_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev;
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev;
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRIVER_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRIVER_VERSION, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d", mdev->fw_ver >> 16, mdev->fw_ver & 0xffff);
strlcpy(drvinfo->bus_info, dev_name(mdev->dev), sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d",
mdev->fw_ver >> 16, mdev->fw_ver & 0xffff);
strlcpy(drvinfo->bus_info, dev_name(mdev->dev), sizeof(drvinfo->bus_info));
}
static int mqnic_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
static int mqnic_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev;
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev;
ethtool_op_get_ts_info(ndev, info);
ethtool_op_get_ts_info(ndev, info);
if (mdev->ptp_clock)
info->phc_index = ptp_clock_index(mdev->ptp_clock);
if (mdev->ptp_clock)
info->phc_index = ptp_clock_index(mdev->ptp_clock);
if (!(priv->if_features & MQNIC_IF_FEATURE_PTP_TS) || !mdev->ptp_clock)
return 0;
if (!(priv->if_features & MQNIC_IF_FEATURE_PTP_TS) || !mdev->ptp_clock)
return 0;
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types =
BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
info->rx_filters =
BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_ALL);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
return 0;
return 0;
}
static int mqnic_read_module_eeprom(struct net_device *ndev, u16 offset, u16 len, u8 *data)
static int mqnic_read_module_eeprom(struct net_device *ndev,
u16 offset, u16 len, u8 * data)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_priv *priv = netdev_priv(ndev);
if (!priv->mod_i2c_client)
{
return -1;
}
if (!priv->mod_i2c_client)
return -1;
if (len > I2C_SMBUS_BLOCK_MAX)
len = I2C_SMBUS_BLOCK_MAX;
if (len > I2C_SMBUS_BLOCK_MAX)
len = I2C_SMBUS_BLOCK_MAX;
return i2c_smbus_read_i2c_block_data(priv->mod_i2c_client, offset, len, data);
return i2c_smbus_read_i2c_block_data(priv->mod_i2c_client, offset, len, data);
}
static int mqnic_get_module_info(struct net_device *ndev, struct ethtool_modinfo *modinfo)
static int mqnic_get_module_info(struct net_device *ndev,
struct ethtool_modinfo *modinfo)
{
struct mqnic_priv *priv = netdev_priv(ndev);
int read_len = 0;
u8 data[16];
struct mqnic_priv *priv = netdev_priv(ndev);
int read_len = 0;
u8 data[16];
// read module ID and revision
read_len = mqnic_read_module_eeprom(ndev, 0, 2, data);
// read module ID and revision
read_len = mqnic_read_module_eeprom(ndev, 0, 2, data);
if (read_len < 2)
return -EIO;
if (read_len < 2)
return -EIO;
// check identifier byte at address 0
switch (data[0]) {
case SFF_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
case SFF_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
case SFF_MODULE_ID_QSFP_PLUS:
// check revision at address 1
if (data[1] >= 0x03)
{
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
}
else
{
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
}
break;
case SFF_MODULE_ID_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
break;
default:
dev_err(priv->dev, "Unknown module ID");
return -EINVAL;
}
// check identifier byte at address 0
switch (data[0]) {
case SFF_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
case SFF_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
case SFF_MODULE_ID_QSFP_PLUS:
// check revision at address 1
if (data[1] >= 0x03) {
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
}
break;
case SFF_MODULE_ID_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
break;
default:
dev_err(priv->dev, "Unknown module ID");
return -EINVAL;
}
return 0;
return 0;
}
static int mqnic_get_module_eeprom(struct net_device *ndev, struct ethtool_eeprom *eeprom, u8 *data)
static int mqnic_get_module_eeprom(struct net_device *ndev,
struct ethtool_eeprom *eeprom, u8 * data)
{
struct mqnic_priv *priv = netdev_priv(ndev);
int i = 0;
int read_len;
struct mqnic_priv *priv = netdev_priv(ndev);
int i = 0;
int read_len;
if (eeprom->len == 0)
return -EINVAL;
if (eeprom->len == 0)
return -EINVAL;
memset(data, 0, eeprom->len);
memset(data, 0, eeprom->len);
while (i < eeprom->len)
{
read_len = mqnic_read_module_eeprom(ndev, eeprom->offset+i, eeprom->len-i, data+i);
while (i < eeprom->len) {
read_len = mqnic_read_module_eeprom(ndev, eeprom->offset + i,
eeprom->len - i, data + i);
if (read_len == 0)
return -EIO;
if (read_len == 0)
return -EIO;
if (read_len < 0)
{
dev_err(priv->dev, "Failed to read module EEPROM");
return 0;
}
if (read_len < 0) {
dev_err(priv->dev, "Failed to read module EEPROM");
return 0;
}
i += read_len;
}
i += read_len;
}
return 0;
return 0;
}
const struct ethtool_ops mqnic_ethtool_ops = {
.get_drvinfo = mqnic_get_drvinfo,
.get_ts_info = mqnic_get_ts_info,
.get_module_info = mqnic_get_module_info,
.get_module_eeprom = mqnic_get_module_eeprom,
.get_drvinfo = mqnic_get_drvinfo,
.get_ts_info = mqnic_get_ts_info,
.get_module_info = mqnic_get_module_info,
.get_module_eeprom = mqnic_get_module_eeprom,
};

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -260,32 +260,32 @@ either expressed or implied, of The Regents of the University of California.
#define MQNIC_EVENT_SIZE 32
struct mqnic_desc {
__le16 rsvd0;
__le16 tx_csum_cmd;
__le32 len;
__le64 addr;
__le16 rsvd0;
__le16 tx_csum_cmd;
__le32 len;
__le64 addr;
};
struct mqnic_cpl {
__le16 queue;
__le16 index;
__le16 len;
__le16 rsvd0;
__le32 ts_ns;
__le16 ts_s;
__le16 rx_csum;
__le32 rx_hash;
__u8 rx_hash_type;
__u8 rsvd1;
__u8 rsvd2;
__u8 rsvd3;
__le32 rsvd4;
__le32 rsvd5;
__le16 queue;
__le16 index;
__le16 len;
__le16 rsvd0;
__le32 ts_ns;
__le16 ts_s;
__le16 rx_csum;
__le32 rx_hash;
__u8 rx_hash_type;
__u8 rsvd1;
__u8 rsvd2;
__u8 rsvd3;
__le32 rsvd4;
__le32 rsvd5;
};
struct mqnic_event {
__le16 type;
__le16 source;
__le16 type;
__le16 source;
};
#endif /* MQNIC_HW_H */

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -35,154 +35,147 @@ either expressed or implied, of The Regents of the University of California.
static void mqnic_i2c_set_scl(void *data, int state)
{
struct mqnic_i2c_bus *bus = data;
struct mqnic_i2c_bus *bus = data;
if (state)
{
iowrite32(ioread32(bus->scl_out_reg) | bus->scl_out_mask, bus->scl_out_reg);
}
else
{
iowrite32(ioread32(bus->scl_out_reg) & ~bus->scl_out_mask, bus->scl_out_reg);
}
if (state)
iowrite32(ioread32(bus->scl_out_reg) | bus->scl_out_mask, bus->scl_out_reg);
else
iowrite32(ioread32(bus->scl_out_reg) & ~bus->scl_out_mask, bus->scl_out_reg);
}
static void mqnic_i2c_set_sda(void *data, int state)
{
struct mqnic_i2c_bus *bus = data;
struct mqnic_i2c_bus *bus = data;
if (state)
{
iowrite32(ioread32(bus->sda_out_reg) | bus->sda_out_mask, bus->sda_out_reg);
}
else
{
iowrite32(ioread32(bus->sda_out_reg) & ~bus->sda_out_mask, bus->sda_out_reg);
}
if (state)
iowrite32(ioread32(bus->sda_out_reg) | bus->sda_out_mask, bus->sda_out_reg);
else
iowrite32(ioread32(bus->sda_out_reg) & ~bus->sda_out_mask, bus->sda_out_reg);
}
static int mqnic_i2c_get_scl(void *data)
{
struct mqnic_i2c_bus *bus = data;
struct mqnic_i2c_bus *bus = data;
return !!(ioread32(bus->scl_in_reg) & bus->scl_in_mask);
return !!(ioread32(bus->scl_in_reg) & bus->scl_in_mask);
}
static int mqnic_i2c_get_sda(void *data)
{
struct mqnic_i2c_bus *bus = data;
struct mqnic_i2c_bus *bus = data;
return !!(ioread32(bus->sda_in_reg) & bus->sda_in_mask);
return !!(ioread32(bus->sda_in_reg) & bus->sda_in_mask);
}
struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *reg)
{
struct mqnic_i2c_bus *bus;
struct i2c_algo_bit_data *algo;
struct i2c_adapter *adapter;
struct mqnic_i2c_bus *bus;
struct i2c_algo_bit_data *algo;
struct i2c_adapter *adapter;
if (!reg)
return NULL;
if (!reg)
return NULL;
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
if (!bus)
return NULL;
if (!bus)
return NULL;
// set private data
bus->mqnic = mqnic;
bus->scl_in_reg = reg;
bus->scl_out_reg = reg;
bus->sda_in_reg = reg;
bus->sda_out_reg = reg;
bus->scl_in_mask = MQNIC_REG_GPIO_I2C_SCL_IN;
bus->scl_out_mask = MQNIC_REG_GPIO_I2C_SCL_OUT;
bus->sda_in_mask = MQNIC_REG_GPIO_I2C_SDA_IN;
bus->sda_out_mask = MQNIC_REG_GPIO_I2C_SDA_OUT;
// set private data
bus->mqnic = mqnic;
bus->scl_in_reg = reg;
bus->scl_out_reg = reg;
bus->sda_in_reg = reg;
bus->sda_out_reg = reg;
bus->scl_in_mask = MQNIC_REG_GPIO_I2C_SCL_IN;
bus->scl_out_mask = MQNIC_REG_GPIO_I2C_SCL_OUT;
bus->sda_in_mask = MQNIC_REG_GPIO_I2C_SDA_IN;
bus->sda_out_mask = MQNIC_REG_GPIO_I2C_SDA_OUT;
// bit-bang algorithm setup
algo = &bus->algo;
algo->udelay = 5;
algo->timeout = usecs_to_jiffies(2000);;
algo->setsda = mqnic_i2c_set_sda;
algo->setscl = mqnic_i2c_set_scl;
algo->getsda = mqnic_i2c_get_sda;
algo->getscl = mqnic_i2c_get_scl;
algo->data = bus;
// bit-bang algorithm setup
algo = &bus->algo;
algo->udelay = 5;
algo->timeout = usecs_to_jiffies(2000);;
algo->setsda = mqnic_i2c_set_sda;
algo->setscl = mqnic_i2c_set_scl;
algo->getsda = mqnic_i2c_get_sda;
algo->getscl = mqnic_i2c_get_scl;
algo->data = bus;
// adapter setup
adapter = &bus->adapter;
adapter->owner = THIS_MODULE;
adapter->algo_data = algo;
adapter->dev.parent = mqnic->dev;
snprintf(adapter->name, sizeof(adapter->name), "%s I2C%d", mqnic->name, mqnic->i2c_adapter_count);
// adapter setup
adapter = &bus->adapter;
adapter->owner = THIS_MODULE;
adapter->algo_data = algo;
adapter->dev.parent = mqnic->dev;
snprintf(adapter->name, sizeof(adapter->name), "%s I2C%d", mqnic->name,
mqnic->i2c_adapter_count);
if (i2c_bit_add_bus(adapter))
{
dev_err(mqnic->dev, "Failed to register I2C adapter");
goto err_free_bus;
}
if (i2c_bit_add_bus(adapter)) {
dev_err(mqnic->dev, "Failed to register I2C adapter");
goto err_free_bus;
}
list_add_tail(&bus->head, &mqnic->i2c_bus);
list_add_tail(&bus->head, &mqnic->i2c_bus);
mqnic->i2c_adapter_count++;
mqnic->i2c_adapter_count++;
return bus;
return bus;
err_free_bus:
kfree(bus);
return NULL;
kfree(bus);
return NULL;
}
struct i2c_adapter *mqnic_i2c_adapter_create(struct mqnic_dev *mqnic, u8 __iomem *reg)
{
struct mqnic_i2c_bus *bus = mqnic_i2c_bus_create(mqnic, reg);
struct mqnic_i2c_bus *bus = mqnic_i2c_bus_create(mqnic, reg);
if (!bus)
return NULL;
if (!bus)
return NULL;
return &bus->adapter;
return &bus->adapter;
}
void mqnic_i2c_bus_release(struct mqnic_i2c_bus *bus)
{
struct mqnic_dev *mqnic;
struct mqnic_dev *mqnic;
if (!bus)
return;
if (!bus)
return;
mqnic = bus->mqnic;
mqnic = bus->mqnic;
mqnic->i2c_adapter_count--;
mqnic->i2c_adapter_count--;
i2c_del_adapter(&bus->adapter);
list_del(&bus->head);
kfree(bus);
i2c_del_adapter(&bus->adapter);
list_del(&bus->head);
kfree(bus);
}
void mqnic_i2c_adapter_release(struct i2c_adapter *adapter)
{
struct mqnic_i2c_bus *bus;
struct mqnic_i2c_bus *bus;
if (!adapter)
return;
if (!adapter)
return;
bus = container_of(adapter, struct mqnic_i2c_bus, adapter);
mqnic_i2c_bus_release(bus);
bus = container_of(adapter, struct mqnic_i2c_bus, adapter);
mqnic_i2c_bus_release(bus);
}
int mqnic_i2c_init(struct mqnic_dev *mqnic)
{
INIT_LIST_HEAD(&mqnic->i2c_bus);
INIT_LIST_HEAD(&mqnic->i2c_bus);
return 0;
return 0;
}
void mqnic_i2c_deinit(struct mqnic_dev *mqnic)
{
while (!list_empty(&mqnic->i2c_bus))
{
struct mqnic_i2c_bus *bus = list_first_entry(&mqnic->i2c_bus, typeof(*bus), head);
mqnic_i2c_bus_release(bus);
}
struct mqnic_i2c_bus *bus;
while (!list_empty(&mqnic->i2c_bus)) {
bus = list_first_entry(&mqnic->i2c_bus, typeof(*bus), head);
mqnic_i2c_bus_release(bus);
}
}

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -41,11 +41,11 @@ either expressed or implied, of The Regents of the University of California.
#define MQNIC_IOCTL_INFO _IOR(MQNIC_IOCTL_TYPE, 0xf0, struct mqnic_ioctl_info)
struct mqnic_ioctl_info {
__u32 fw_id;
__u32 fw_ver;
__u32 board_id;
__u32 board_ver;
size_t regs_size;
__u32 fw_id;
__u32 fw_ver;
__u32 board_id;
__u32 board_ver;
size_t regs_size;
};
#endif /* MQNIC_IOCTL_H */

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -36,7 +36,7 @@ either expressed or implied, of The Regents of the University of California.
#include <linux/version.h>
#include <linux/delay.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
#include <linux/pci-aspm.h>
#endif
@ -46,9 +46,9 @@ MODULE_LICENSE("Dual MIT/GPL");
MODULE_VERSION(DRIVER_VERSION);
static const struct pci_device_id mqnic_pci_id_table[] = {
{ PCI_DEVICE(0x1234, 0x1001) },
{ PCI_DEVICE(0x5543, 0x1001) },
{ 0 /* end */ }
{PCI_DEVICE(0x1234, 0x1001)},
{PCI_DEVICE(0x5543, 0x1001)},
{0 /* end */ }
};
MODULE_DEVICE_TABLE(pci, mqnic_pci_id_table);
@ -58,416 +58,388 @@ static DEFINE_SPINLOCK(mqnic_devices_lock);
static unsigned int mqnic_get_free_id(void)
{
struct mqnic_dev *mqnic;
unsigned int id = 0;
bool available = false;
struct mqnic_dev *mqnic;
unsigned int id = 0;
bool available = false;
while (!available)
{
available = true;
list_for_each_entry(mqnic, &mqnic_devices, dev_list_node)
{
if (mqnic->id == id)
{
available = false;
id++;
break;
}
}
}
while (!available) {
available = true;
list_for_each_entry(mqnic, &mqnic_devices, dev_list_node) {
if (mqnic->id == id) {
available = false;
id++;
break;
}
}
}
return id;
return id;
}
static irqreturn_t mqnic_interrupt(int irq, void *data)
{
struct mqnic_dev *mqnic = data;
struct mqnic_priv *priv;
struct mqnic_dev *mqnic = data;
struct mqnic_priv *priv;
int k, l;
int k, l;
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
{
if (unlikely(!mqnic->ndev[k]))
continue;
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++) {
if (unlikely(!mqnic->ndev[k]))
continue;
priv = netdev_priv(mqnic->ndev[k]);
priv = netdev_priv(mqnic->ndev[k]);
if (unlikely(!priv->port_up))
continue;
if (unlikely(!priv->port_up))
continue;
for (l = 0; l < priv->event_queue_count; l++)
{
if (unlikely(!priv->event_ring[l]))
continue;
for (l = 0; l < priv->event_queue_count; l++) {
if (unlikely(!priv->event_ring[l]))
continue;
if (priv->event_ring[l]->irq == irq)
{
mqnic_process_eq(priv->ndev, priv->event_ring[l]);
mqnic_arm_eq(priv->event_ring[l]);
}
}
}
if (priv->event_ring[l]->irq == irq) {
mqnic_process_eq(priv->ndev, priv->event_ring[l]);
mqnic_arm_eq(priv->event_ring[l]);
}
}
}
return IRQ_HANDLED;
return IRQ_HANDLED;
}
static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret = 0;
struct mqnic_dev *mqnic;
struct device *dev = &pdev->dev;
int ret = 0;
struct mqnic_dev *mqnic;
struct device *dev = &pdev->dev;
int k = 0;
int k = 0;
dev_info(dev, DRIVER_NAME " PCI probe");
dev_info(dev, " Vendor: 0x%04x", pdev->vendor);
dev_info(dev, " Device: 0x%04x", pdev->device);
dev_info(dev, " Class: 0x%06x", pdev->class);
dev_info(dev, " PCI ID: %04x:%02x:%02x.%d", pci_domain_nr(pdev->bus),
pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
if (pdev->pcie_cap) {
u16 devctl;
u32 lnkcap;
u16 lnksta;
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_DEVCTL, &devctl);
pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &lnkcap);
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_LNKSTA, &lnksta);
dev_info(dev, " Max payload size: %d bytes", 128 << ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5));
dev_info(dev, " Max read request size: %d bytes", 128 << ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12));
dev_info(dev, " Link capability: gen %d x%d", lnkcap & PCI_EXP_LNKCAP_SLS, (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
dev_info(dev, " Link status: gen %d x%d", lnksta & PCI_EXP_LNKSTA_CLS, (lnksta & PCI_EXP_LNKSTA_NLW) >> 4);
dev_info(dev, " Relaxed ordering: %s", devctl & PCI_EXP_DEVCTL_RELAX_EN ? "enabled" : "disabled");
dev_info(dev, " Phantom functions: %s", devctl & PCI_EXP_DEVCTL_PHANTOM ? "enabled" : "disabled");
dev_info(dev, " Extended tags: %s", devctl & PCI_EXP_DEVCTL_EXT_TAG ? "enabled" : "disabled");
dev_info(dev, " No snoop: %s", devctl & PCI_EXP_DEVCTL_NOSNOOP_EN ? "enabled" : "disabled");
}
dev_info(dev, DRIVER_NAME " PCI probe");
dev_info(dev, " Vendor: 0x%04x", pdev->vendor);
dev_info(dev, " Device: 0x%04x", pdev->device);
dev_info(dev, " Class: 0x%06x", pdev->class);
dev_info(dev, " PCI ID: %04x:%02x:%02x.%d", pci_domain_nr(pdev->bus),
pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
if (pdev->pcie_cap) {
u16 devctl;
u32 lnkcap;
u16 lnksta;
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_DEVCTL, &devctl);
pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &lnkcap);
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_LNKSTA, &lnksta);
dev_info(dev, " Max payload size: %d bytes",
128 << ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5));
dev_info(dev, " Max read request size: %d bytes",
128 << ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12));
dev_info(dev, " Link capability: gen %d x%d",
lnkcap & PCI_EXP_LNKCAP_SLS, (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
dev_info(dev, " Link status: gen %d x%d",
lnksta & PCI_EXP_LNKSTA_CLS, (lnksta & PCI_EXP_LNKSTA_NLW) >> 4);
dev_info(dev, " Relaxed ordering: %s",
devctl & PCI_EXP_DEVCTL_RELAX_EN ? "enabled" : "disabled");
dev_info(dev, " Phantom functions: %s",
devctl & PCI_EXP_DEVCTL_PHANTOM ? "enabled" : "disabled");
dev_info(dev, " Extended tags: %s",
devctl & PCI_EXP_DEVCTL_EXT_TAG ? "enabled" : "disabled");
dev_info(dev, " No snoop: %s",
devctl & PCI_EXP_DEVCTL_NOSNOOP_EN ? "enabled" : "disabled");
}
#ifdef CONFIG_NUMA
dev_info(dev, " NUMA node: %d", pdev->dev.numa_node);
dev_info(dev, " NUMA node: %d", pdev->dev.numa_node);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0)
pcie_print_link_status(pdev);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0)
pcie_print_link_status(pdev);
#endif
if (!(mqnic = devm_kzalloc(dev, sizeof(*mqnic), GFP_KERNEL)))
{
return -ENOMEM;
}
mqnic = devm_kzalloc(dev, sizeof(*mqnic), GFP_KERNEL);
if (!mqnic) {
dev_err(dev, "Failed to allocate memory");
return -ENOMEM;
}
mqnic->dev = dev;
mqnic->pdev = pdev;
pci_set_drvdata(pdev, mqnic);
mqnic->dev = dev;
mqnic->pdev = pdev;
pci_set_drvdata(pdev, mqnic);
// assign ID and add to list
spin_lock(&mqnic_devices_lock);
mqnic->id = mqnic_get_free_id();
list_add_tail(&mqnic->dev_list_node, &mqnic_devices);
spin_unlock(&mqnic_devices_lock);
// assign ID and add to list
spin_lock(&mqnic_devices_lock);
mqnic->id = mqnic_get_free_id();
list_add_tail(&mqnic->dev_list_node, &mqnic_devices);
spin_unlock(&mqnic_devices_lock);
snprintf(mqnic->name, sizeof(mqnic->name), DRIVER_NAME "%d", mqnic->id);
snprintf(mqnic->name, sizeof(mqnic->name), DRIVER_NAME "%d", mqnic->id);
// Disable ASPM
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
// Disable ASPM
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
// Enable device
ret = pci_enable_device_mem(pdev);
if (ret)
{
dev_err(dev, "Failed to enable PCI device");
goto fail_enable_device;
}
// Enable device
ret = pci_enable_device_mem(pdev);
if (ret) {
dev_err(dev, "Failed to enable PCI device");
goto fail_enable_device;
}
// Set mask
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
{
dev_warn(dev, "Warning: failed to set 64 bit PCI DMA mask");
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
{
dev_err(dev, "Failed to set PCI DMA mask");
goto fail_regions;
}
}
// Set mask
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
dev_warn(dev, "Warning: failed to set 64 bit PCI DMA mask");
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Failed to set PCI DMA mask");
goto fail_regions;
}
}
// Set max segment size
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
// Set max segment size
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
// Reserve regions
ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret)
{
dev_err(dev, "Failed to reserve regions");
goto fail_regions;
}
// Reserve regions
ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret) {
dev_err(dev, "Failed to reserve regions");
goto fail_regions;
}
mqnic->hw_regs_size = pci_resource_len(pdev, 0);
mqnic->hw_regs_phys = pci_resource_start(pdev, 0);
mqnic->app_hw_regs_size = pci_resource_len(pdev, 2);
mqnic->app_hw_regs_phys = pci_resource_start(pdev, 2);
mqnic->ram_hw_regs_size = pci_resource_len(pdev, 4);
mqnic->ram_hw_regs_phys = pci_resource_start(pdev, 4);
mqnic->hw_regs_size = pci_resource_len(pdev, 0);
mqnic->hw_regs_phys = pci_resource_start(pdev, 0);
mqnic->app_hw_regs_size = pci_resource_len(pdev, 2);
mqnic->app_hw_regs_phys = pci_resource_start(pdev, 2);
mqnic->ram_hw_regs_size = pci_resource_len(pdev, 4);
mqnic->ram_hw_regs_phys = pci_resource_start(pdev, 4);
// Map BARs
dev_info(dev, "Control BAR size: %llu", mqnic->hw_regs_size);
mqnic->hw_addr = pci_ioremap_bar(pdev, 0);
if (!mqnic->hw_addr)
{
ret = -ENOMEM;
dev_err(dev, "Failed to map control BAR");
goto fail_map_bars;
}
// Map BARs
dev_info(dev, "Control BAR size: %llu", mqnic->hw_regs_size);
mqnic->hw_addr = pci_ioremap_bar(pdev, 0);
if (!mqnic->hw_addr) {
ret = -ENOMEM;
dev_err(dev, "Failed to map control BAR");
goto fail_map_bars;
}
if (mqnic->app_hw_regs_size)
{
dev_info(dev, "Application BAR size: %llu", mqnic->app_hw_regs_size);
mqnic->app_hw_addr = pci_ioremap_bar(pdev, 2);
if (!mqnic->app_hw_addr)
{
ret = -ENOMEM;
dev_err(dev, "Failed to map application BAR");
goto fail_map_bars;
}
}
if (mqnic->app_hw_regs_size) {
dev_info(dev, "Application BAR size: %llu", mqnic->app_hw_regs_size);
mqnic->app_hw_addr = pci_ioremap_bar(pdev, 2);
if (!mqnic->app_hw_addr) {
ret = -ENOMEM;
dev_err(dev, "Failed to map application BAR");
goto fail_map_bars;
}
}
if (mqnic->ram_hw_regs_size)
{
dev_info(dev, "RAM BAR size: %llu", mqnic->ram_hw_regs_size);
mqnic->ram_hw_addr = pci_ioremap_bar(pdev, 4);
if (!mqnic->ram_hw_addr)
{
ret = -ENOMEM;
dev_err(dev, "Failed to map RAM BAR");
goto fail_map_bars;
}
}
if (mqnic->ram_hw_regs_size) {
dev_info(dev, "RAM BAR size: %llu", mqnic->ram_hw_regs_size);
mqnic->ram_hw_addr = pci_ioremap_bar(pdev, 4);
if (!mqnic->ram_hw_addr) {
ret = -ENOMEM;
dev_err(dev, "Failed to map RAM BAR");
goto fail_map_bars;
}
}
// Check if device needs to be reset
if (ioread32(mqnic->hw_addr) == 0xffffffff)
{
ret = -EIO;
dev_err(dev, "Device needs to be reset");
goto fail_map_bars;
}
// Check if device needs to be reset
if (ioread32(mqnic->hw_addr) == 0xffffffff) {
ret = -EIO;
dev_err(dev, "Device needs to be reset");
goto fail_map_bars;
}
// Read ID registers
mqnic->fw_id = ioread32(mqnic->hw_addr+MQNIC_REG_FW_ID);
dev_info(dev, "FW ID: 0x%08x", mqnic->fw_id);
mqnic->fw_ver = ioread32(mqnic->hw_addr+MQNIC_REG_FW_VER);
dev_info(dev, "FW version: %d.%d", mqnic->fw_ver >> 16, mqnic->fw_ver & 0xffff);
mqnic->board_id = ioread32(mqnic->hw_addr+MQNIC_REG_BOARD_ID);
dev_info(dev, "Board ID: 0x%08x", mqnic->board_id);
mqnic->board_ver = ioread32(mqnic->hw_addr+MQNIC_REG_BOARD_VER);
dev_info(dev, "Board version: %d.%d", mqnic->board_ver >> 16, mqnic->board_ver & 0xffff);
// Read ID registers
mqnic->fw_id = ioread32(mqnic->hw_addr + MQNIC_REG_FW_ID);
dev_info(dev, "FW ID: 0x%08x", mqnic->fw_id);
mqnic->fw_ver = ioread32(mqnic->hw_addr + MQNIC_REG_FW_VER);
dev_info(dev, "FW version: %d.%d", mqnic->fw_ver >> 16, mqnic->fw_ver & 0xffff);
mqnic->board_id = ioread32(mqnic->hw_addr + MQNIC_REG_BOARD_ID);
dev_info(dev, "Board ID: 0x%08x", mqnic->board_id);
mqnic->board_ver = ioread32(mqnic->hw_addr + MQNIC_REG_BOARD_VER);
dev_info(dev, "Board version: %d.%d", mqnic->board_ver >> 16, mqnic->board_ver & 0xffff);
mqnic->phc_count = ioread32(mqnic->hw_addr+MQNIC_REG_PHC_COUNT);
dev_info(dev, "PHC count: %d", mqnic->phc_count);
mqnic->phc_offset = ioread32(mqnic->hw_addr+MQNIC_REG_PHC_OFFSET);
dev_info(dev, "PHC offset: 0x%08x", mqnic->phc_offset);
mqnic->phc_count = ioread32(mqnic->hw_addr + MQNIC_REG_PHC_COUNT);
dev_info(dev, "PHC count: %d", mqnic->phc_count);
mqnic->phc_offset = ioread32(mqnic->hw_addr + MQNIC_REG_PHC_OFFSET);
dev_info(dev, "PHC offset: 0x%08x", mqnic->phc_offset);
if (mqnic->phc_count)
mqnic->phc_hw_addr = mqnic->hw_addr+mqnic->phc_offset;
if (mqnic->phc_count)
mqnic->phc_hw_addr = mqnic->hw_addr + mqnic->phc_offset;
mqnic->if_count = ioread32(mqnic->hw_addr+MQNIC_REG_IF_COUNT);
dev_info(dev, "IF count: %d", mqnic->if_count);
mqnic->if_stride = ioread32(mqnic->hw_addr+MQNIC_REG_IF_STRIDE);
dev_info(dev, "IF stride: 0x%08x", mqnic->if_stride);
mqnic->if_csr_offset = ioread32(mqnic->hw_addr+MQNIC_REG_IF_CSR_OFFSET);
dev_info(dev, "IF CSR offset: 0x%08x", mqnic->if_csr_offset);
mqnic->if_count = ioread32(mqnic->hw_addr + MQNIC_REG_IF_COUNT);
dev_info(dev, "IF count: %d", mqnic->if_count);
mqnic->if_stride = ioread32(mqnic->hw_addr + MQNIC_REG_IF_STRIDE);
dev_info(dev, "IF stride: 0x%08x", mqnic->if_stride);
mqnic->if_csr_offset = ioread32(mqnic->hw_addr + MQNIC_REG_IF_CSR_OFFSET);
dev_info(dev, "IF CSR offset: 0x%08x", mqnic->if_csr_offset);
// check BAR size
if (mqnic->if_count*mqnic->if_stride > mqnic->hw_regs_size)
{
ret = -EIO;
dev_err(dev, "Invalid BAR configuration (%d IF * 0x%x > 0x%llx)", mqnic->if_count, mqnic->if_stride, mqnic->hw_regs_size);
goto fail_map_bars;
}
// check BAR size
if (mqnic->if_count * mqnic->if_stride > mqnic->hw_regs_size) {
ret = -EIO;
dev_err(dev, "Invalid BAR configuration (%d IF * 0x%x > 0x%llx)",
mqnic->if_count, mqnic->if_stride, mqnic->hw_regs_size);
goto fail_map_bars;
}
// Allocate MSI IRQs
mqnic->irq_count = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (mqnic->irq_count < 0)
{
ret = -ENOMEM;
dev_err(dev, "Failed to allocate IRQs");
goto fail_map_bars;
}
// Allocate MSI IRQs
mqnic->irq_count = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (mqnic->irq_count < 0) {
ret = -ENOMEM;
dev_err(dev, "Failed to allocate IRQs");
goto fail_map_bars;
}
// Set up interrupts
for (k = 0; k < mqnic->irq_count; k++)
{
ret = pci_request_irq(pdev, k, mqnic_interrupt, NULL, mqnic, "%s-%d", mqnic->name, k);
if (ret < 0)
{
dev_err(dev, "Failed to request IRQ");
goto fail_irq;
}
// Set up interrupts
for (k = 0; k < mqnic->irq_count; k++) {
ret = pci_request_irq(pdev, k, mqnic_interrupt, NULL,
mqnic, "%s-%d", mqnic->name, k);
if (ret < 0) {
dev_err(dev, "Failed to request IRQ");
goto fail_irq;
}
mqnic->irq_map[k] = pci_irq_vector(pdev, k);
}
mqnic->irq_map[k] = pci_irq_vector(pdev, k);
}
// Board-specific init
ret = mqnic_board_init(mqnic);
if (ret)
{
dev_err(dev, "Failed to initialize board");
goto fail_board;
}
// Board-specific init
ret = mqnic_board_init(mqnic);
if (ret) {
dev_err(dev, "Failed to initialize board");
goto fail_board;
}
// Enable bus mastering for DMA
pci_set_master(pdev);
// Enable bus mastering for DMA
pci_set_master(pdev);
// register PHC
if (mqnic->phc_count)
{
mqnic_register_phc(mqnic);
}
// register PHC
if (mqnic->phc_count)
mqnic_register_phc(mqnic);
// Set up interfaces
if (mqnic->if_count > MQNIC_MAX_IF)
mqnic->if_count = MQNIC_MAX_IF;
// Set up interfaces
if (mqnic->if_count > MQNIC_MAX_IF)
mqnic->if_count = MQNIC_MAX_IF;
for (k = 0; k < mqnic->if_count; k++)
{
dev_info(dev, "Creating interface %d", k);
ret = mqnic_init_netdev(mqnic, k, mqnic->hw_addr + k*mqnic->if_stride);
if (ret)
{
dev_err(dev, "Failed to create net_device");
goto fail_init_netdev;
}
}
for (k = 0; k < mqnic->if_count; k++) {
dev_info(dev, "Creating interface %d", k);
ret = mqnic_init_netdev(mqnic, k, mqnic->hw_addr + k * mqnic->if_stride);
if (ret) {
dev_err(dev, "Failed to create net_device");
goto fail_init_netdev;
}
}
// pass module I2C clients to net_device instances
for (k = 0; k < mqnic->if_count; k++)
{
struct mqnic_priv *priv = netdev_priv(mqnic->ndev[k]);
priv->mod_i2c_client = mqnic->mod_i2c_client[k];
}
// pass module I2C clients to net_device instances
for (k = 0; k < mqnic->if_count; k++) {
struct mqnic_priv *priv = netdev_priv(mqnic->ndev[k]);
priv->mod_i2c_client = mqnic->mod_i2c_client[k];
}
mqnic->misc_dev.minor = MISC_DYNAMIC_MINOR;
mqnic->misc_dev.name = mqnic->name;
mqnic->misc_dev.fops = &mqnic_fops;
mqnic->misc_dev.parent = dev;
mqnic->misc_dev.minor = MISC_DYNAMIC_MINOR;
mqnic->misc_dev.name = mqnic->name;
mqnic->misc_dev.fops = &mqnic_fops;
mqnic->misc_dev.parent = dev;
ret = misc_register(&mqnic->misc_dev);
if (ret)
{
dev_err(dev, "misc_register failed: %d\n", ret);
goto fail_miscdev;
}
ret = misc_register(&mqnic->misc_dev);
if (ret) {
dev_err(dev, "misc_register failed: %d\n", ret);
goto fail_miscdev;
}
dev_info(dev, "Registered device %s", mqnic->name);
dev_info(dev, "Registered device %s", mqnic->name);
pci_save_state(pdev);
pci_save_state(pdev);
mutex_init(&mqnic->state_lock);
mutex_init(&mqnic->state_lock);
// probe complete
return 0;
// probe complete
return 0;
// error handling
// error handling
fail_miscdev:
fail_init_netdev:
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
{
if (mqnic->ndev[k])
{
mqnic_destroy_netdev(mqnic->ndev[k]);
}
}
mqnic_unregister_phc(mqnic);
pci_clear_master(pdev);
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
if (mqnic->ndev[k])
mqnic_destroy_netdev(mqnic->ndev[k]);
mqnic_unregister_phc(mqnic);
pci_clear_master(pdev);
fail_board:
mqnic_board_deinit(mqnic);
for (k = 0; k < mqnic->irq_count; k++)
{
pci_free_irq(pdev, k, mqnic);
}
mqnic_board_deinit(mqnic);
for (k = 0; k < mqnic->irq_count; k++)
pci_free_irq(pdev, k, mqnic);
fail_irq:
pci_free_irq_vectors(pdev);
pci_free_irq_vectors(pdev);
fail_map_bars:
if (mqnic->hw_addr)
pci_iounmap(pdev, mqnic->hw_addr);
if (mqnic->app_hw_addr)
pci_iounmap(pdev, mqnic->app_hw_addr);
if (mqnic->ram_hw_addr)
pci_iounmap(pdev, mqnic->ram_hw_addr);
pci_release_regions(pdev);
if (mqnic->hw_addr)
pci_iounmap(pdev, mqnic->hw_addr);
if (mqnic->app_hw_addr)
pci_iounmap(pdev, mqnic->app_hw_addr);
if (mqnic->ram_hw_addr)
pci_iounmap(pdev, mqnic->ram_hw_addr);
pci_release_regions(pdev);
fail_regions:
pci_disable_device(pdev);
pci_disable_device(pdev);
fail_enable_device:
spin_lock(&mqnic_devices_lock);
list_del(&mqnic->dev_list_node);
spin_unlock(&mqnic_devices_lock);
return ret;
spin_lock(&mqnic_devices_lock);
list_del(&mqnic->dev_list_node);
spin_unlock(&mqnic_devices_lock);
return ret;
}
static void mqnic_pci_remove(struct pci_dev *pdev)
{
struct mqnic_dev *mqnic = pci_get_drvdata(pdev);
struct mqnic_dev *mqnic = pci_get_drvdata(pdev);
int k = 0;
int k = 0;
dev_info(&pdev->dev, DRIVER_NAME " PCI remove");
dev_info(&pdev->dev, DRIVER_NAME " PCI remove");
misc_deregister(&mqnic->misc_dev);
misc_deregister(&mqnic->misc_dev);
spin_lock(&mqnic_devices_lock);
list_del(&mqnic->dev_list_node);
spin_unlock(&mqnic_devices_lock);
spin_lock(&mqnic_devices_lock);
list_del(&mqnic->dev_list_node);
spin_unlock(&mqnic_devices_lock);
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
{
if (mqnic->ndev[k])
{
mqnic_destroy_netdev(mqnic->ndev[k]);
}
}
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
if (mqnic->ndev[k])
mqnic_destroy_netdev(mqnic->ndev[k]);
mqnic_unregister_phc(mqnic);
mqnic_unregister_phc(mqnic);
pci_clear_master(pdev);
mqnic_board_deinit(mqnic);
for (k = 0; k < mqnic->irq_count; k++)
{
pci_free_irq(pdev, k, mqnic);
}
pci_free_irq_vectors(pdev);
if (mqnic->hw_addr)
pci_iounmap(pdev, mqnic->hw_addr);
if (mqnic->app_hw_addr)
pci_iounmap(pdev, mqnic->app_hw_addr);
if (mqnic->ram_hw_addr)
pci_iounmap(pdev, mqnic->ram_hw_addr);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_clear_master(pdev);
mqnic_board_deinit(mqnic);
for (k = 0; k < mqnic->irq_count; k++)
pci_free_irq(pdev, k, mqnic);
pci_free_irq_vectors(pdev);
if (mqnic->hw_addr)
pci_iounmap(pdev, mqnic->hw_addr);
if (mqnic->app_hw_addr)
pci_iounmap(pdev, mqnic->app_hw_addr);
if (mqnic->ram_hw_addr)
pci_iounmap(pdev, mqnic->ram_hw_addr);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static void mqnic_pci_shutdown(struct pci_dev *pdev)
{
dev_info(&pdev->dev, DRIVER_NAME " PCI shutdown");
dev_info(&pdev->dev, DRIVER_NAME " PCI shutdown");
mqnic_pci_remove(pdev);
mqnic_pci_remove(pdev);
}
static struct pci_driver mqnic_pci_driver = {
.name = DRIVER_NAME,
.id_table = mqnic_pci_id_table,
.probe = mqnic_pci_probe,
.remove = mqnic_pci_remove,
.shutdown = mqnic_pci_shutdown
.name = DRIVER_NAME,
.id_table = mqnic_pci_id_table,
.probe = mqnic_pci_probe,
.remove = mqnic_pci_remove,
.shutdown = mqnic_pci_shutdown
};
static int __init mqnic_init(void)
{
return pci_register_driver(&mqnic_pci_driver);
return pci_register_driver(&mqnic_pci_driver);
}
static void __exit mqnic_exit(void)
{
pci_unregister_driver(&mqnic_pci_driver);
pci_unregister_driver(&mqnic_pci_driver);
}
module_init(mqnic_init);
module_exit(mqnic_exit);

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -33,109 +33,107 @@ either expressed or implied, of The Regents of the University of California.
#include "mqnic.h"
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr, int index, u8 __iomem *hw_addr)
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
int index, u8 __iomem *hw_addr)
{
struct device *dev = priv->dev;
struct mqnic_port *port;
struct device *dev = priv->dev;
struct mqnic_port *port;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
{
dev_err(dev, "Failed to allocate port");
return -ENOMEM;
}
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port) {
dev_err(dev, "Failed to allocate port");
return -ENOMEM;
}
*port_ptr = port;
*port_ptr = port;
port->dev = dev;
port->ndev = priv->ndev;
port->dev = dev;
port->ndev = priv->ndev;
port->index = index;
port->index = index;
port->tx_queue_count = priv->tx_queue_count;
port->tx_queue_count = priv->tx_queue_count;
port->hw_addr = hw_addr;
port->hw_addr = hw_addr;
// read ID registers
port->port_id = ioread32(port->hw_addr+MQNIC_PORT_REG_PORT_ID);
dev_info(dev, "Port ID: 0x%08x", port->port_id);
port->port_features = ioread32(port->hw_addr+MQNIC_PORT_REG_PORT_FEATURES);
dev_info(dev, "Port features: 0x%08x", port->port_features);
port->port_mtu = ioread32(port->hw_addr+MQNIC_PORT_REG_PORT_MTU);
dev_info(dev, "Port MTU: %d", port->port_mtu);
// read ID registers
port->port_id = ioread32(port->hw_addr + MQNIC_PORT_REG_PORT_ID);
dev_info(dev, "Port ID: 0x%08x", port->port_id);
port->port_features = ioread32(port->hw_addr + MQNIC_PORT_REG_PORT_FEATURES);
dev_info(dev, "Port features: 0x%08x", port->port_features);
port->port_mtu = ioread32(port->hw_addr + MQNIC_PORT_REG_PORT_MTU);
dev_info(dev, "Port MTU: %d", port->port_mtu);
port->sched_count = ioread32(port->hw_addr+MQNIC_PORT_REG_SCHED_COUNT);
dev_info(dev, "Scheduler count: %d", port->sched_count);
port->sched_offset = ioread32(port->hw_addr+MQNIC_PORT_REG_SCHED_OFFSET);
dev_info(dev, "Scheduler offset: 0x%08x", port->sched_offset);
port->sched_stride = ioread32(port->hw_addr+MQNIC_PORT_REG_SCHED_STRIDE);
dev_info(dev, "Scheduler stride: 0x%08x", port->sched_stride);
port->sched_type = ioread32(port->hw_addr+MQNIC_PORT_REG_SCHED_TYPE);
dev_info(dev, "Scheduler type: 0x%08x", port->sched_type);
port->sched_count = ioread32(port->hw_addr + MQNIC_PORT_REG_SCHED_COUNT);
dev_info(dev, "Scheduler count: %d", port->sched_count);
port->sched_offset = ioread32(port->hw_addr + MQNIC_PORT_REG_SCHED_OFFSET);
dev_info(dev, "Scheduler offset: 0x%08x", port->sched_offset);
port->sched_stride = ioread32(port->hw_addr + MQNIC_PORT_REG_SCHED_STRIDE);
dev_info(dev, "Scheduler stride: 0x%08x", port->sched_stride);
port->sched_type = ioread32(port->hw_addr + MQNIC_PORT_REG_SCHED_TYPE);
dev_info(dev, "Scheduler type: 0x%08x", port->sched_type);
mqnic_deactivate_port(port);
mqnic_deactivate_port(port);
return 0;
return 0;
}
void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr)
{
struct mqnic_port *port = *port_ptr;
*port_ptr = NULL;
struct mqnic_port *port = *port_ptr;
*port_ptr = NULL;
mqnic_deactivate_port(port);
mqnic_deactivate_port(port);
kfree(port);
kfree(port);
}
int mqnic_activate_port(struct mqnic_port *port)
{
int k;
int k;
// enable schedulers
iowrite32(0xffffffff, port->hw_addr+MQNIC_PORT_REG_SCHED_ENABLE);
// enable schedulers
iowrite32(0xffffffff, port->hw_addr + MQNIC_PORT_REG_SCHED_ENABLE);
// enable queues
for (k = 0; k < port->tx_queue_count; k++)
{
iowrite32(3, port->hw_addr+port->sched_offset+k*4);
}
// enable queues
for (k = 0; k < port->tx_queue_count; k++)
iowrite32(3, port->hw_addr + port->sched_offset + k * 4);
return 0;
return 0;
}
void mqnic_deactivate_port(struct mqnic_port *port)
{
// disable schedulers
iowrite32(0, port->hw_addr+MQNIC_PORT_REG_SCHED_ENABLE);
// disable schedulers
iowrite32(0, port->hw_addr + MQNIC_PORT_REG_SCHED_ENABLE);
}
u32 mqnic_port_get_rss_mask(struct mqnic_port *port)
{
return ioread32(port->hw_addr+MQNIC_PORT_REG_RSS_MASK);
return ioread32(port->hw_addr + MQNIC_PORT_REG_RSS_MASK);
}
void mqnic_port_set_rss_mask(struct mqnic_port *port, u32 rss_mask)
{
iowrite32(rss_mask, port->hw_addr+MQNIC_PORT_REG_RSS_MASK);
iowrite32(rss_mask, port->hw_addr + MQNIC_PORT_REG_RSS_MASK);
}
u32 mqnic_port_get_tx_mtu(struct mqnic_port *port)
{
return ioread32(port->hw_addr+MQNIC_PORT_REG_TX_MTU);
return ioread32(port->hw_addr + MQNIC_PORT_REG_TX_MTU);
}
void mqnic_port_set_tx_mtu(struct mqnic_port *port, u32 mtu)
{
iowrite32(mtu, port->hw_addr+MQNIC_PORT_REG_TX_MTU);
iowrite32(mtu, port->hw_addr + MQNIC_PORT_REG_TX_MTU);
}
u32 mqnic_port_get_rx_mtu(struct mqnic_port *port)
{
return ioread32(port->hw_addr+MQNIC_PORT_REG_RX_MTU);
return ioread32(port->hw_addr + MQNIC_PORT_REG_RX_MTU);
}
void mqnic_port_set_rx_mtu(struct mqnic_port *port, u32 mtu)
{
iowrite32(mtu, port->hw_addr+MQNIC_PORT_REG_RX_MTU);
iowrite32(mtu, port->hw_addr + MQNIC_PORT_REG_RX_MTU);
}

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -34,271 +34,250 @@ either expressed or implied, of The Regents of the University of California.
#include "mqnic.h"
#include <linux/version.h>
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring, const struct mqnic_cpl *cpl)
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring,
const struct mqnic_cpl *cpl)
{
u64 ts_s = le16_to_cpu(cpl->ts_s);
u32 ts_ns = le32_to_cpu(cpl->ts_ns);
u64 ts_s = le16_to_cpu(cpl->ts_s);
u32 ts_ns = le32_to_cpu(cpl->ts_ns);
if (unlikely(!ring->ts_valid || (ring->ts_s ^ ts_s) & 0xff00))
{
// seconds MSBs do not match, update cached timestamp
ring->ts_s = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_CUR_SEC_L);
ring->ts_s |= (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_CUR_SEC_H) << 32;
ring->ts_valid = 1;
}
if (unlikely(!ring->ts_valid || (ring->ts_s ^ ts_s) & 0xff00)) {
// seconds MSBs do not match, update cached timestamp
ring->ts_s = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_CUR_SEC_L);
ring->ts_s |= (u64) ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_CUR_SEC_H) << 32;
ring->ts_valid = 1;
}
ts_s |= ring->ts_s & 0xffffffffffffff00;
ts_s |= ring->ts_s & 0xffffffffffffff00;
return ktime_set(ts_s, ts_ns);
return ktime_set(ts_s, ts_ns);
}
static int mqnic_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
bool neg = false;
u64 nom_per_fns, adj;
bool neg = false;
u64 nom_per_fns, adj;
dev_info(mdev->dev, "mqnic_phc_adjfine scaled_ppm: %ld", scaled_ppm);
dev_info(mdev->dev, "mqnic_phc_adjfine scaled_ppm: %ld", scaled_ppm);
if (scaled_ppm < 0)
{
neg = true;
scaled_ppm = -scaled_ppm;
}
if (scaled_ppm < 0) {
neg = true;
scaled_ppm = -scaled_ppm;
}
nom_per_fns = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_NOM_PERIOD_FNS);
nom_per_fns = (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_NOM_PERIOD_NS) << 32;
nom_per_fns = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_NOM_PERIOD_FNS);
nom_per_fns = (u64) ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_NOM_PERIOD_NS) << 32;
if (nom_per_fns == 0)
nom_per_fns = 0x4ULL << 32;
if (nom_per_fns == 0)
nom_per_fns = 0x4ULL << 32;
adj = div_u64(((nom_per_fns >> 16) * scaled_ppm) + 500000, 1000000);
adj = div_u64(((nom_per_fns >> 16) * scaled_ppm) + 500000, 1000000);
if (neg)
{
adj = nom_per_fns - adj;
}
else
{
adj = nom_per_fns + adj;
}
if (neg)
adj = nom_per_fns - adj;
else
adj = nom_per_fns + adj;
iowrite32(adj & 0xffffffff, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_PERIOD_FNS);
iowrite32(adj >> 32, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_PERIOD_NS);
iowrite32(adj & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_FNS);
iowrite32(adj >> 32, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_NS);
dev_info(mdev->dev, "mqnic_phc_adjfine adj: 0x%llx", adj);
dev_info(mdev->dev, "mqnic_phc_adjfine adj: 0x%llx", adj);
return 0;
return 0;
}
static int mqnic_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_FNS);
ts->tv_nsec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_NS);
ts->tv_sec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_L);
ts->tv_sec |= (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_H) << 32;
ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_FNS);
ts->tv_nsec = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_NS);
ts->tv_sec = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_SEC_L);
ts->tv_sec |= (u64) ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_SEC_H) << 32;
return 0;
return 0;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)
static int mqnic_phc_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
static int mqnic_phc_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
ptp_read_system_prets(sts);
ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_FNS);
ptp_read_system_postts(sts);
ts->tv_nsec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_NS);
ts->tv_sec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_L);
ts->tv_sec |= (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_H) << 32;
ptp_read_system_prets(sts);
ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_FNS);
ptp_read_system_postts(sts);
ts->tv_nsec = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_NS);
ts->tv_sec = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_SEC_L);
ts->tv_sec |= (u64) ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_SEC_H) << 32;
return 0;
return 0;
}
#endif
static int mqnic_phc_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
iowrite32(0, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_FNS);
iowrite32(ts->tv_nsec, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_NS);
iowrite32(ts->tv_sec & 0xffffffff, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_SEC_L);
iowrite32(ts->tv_sec >> 32, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_SEC_H);
iowrite32(0, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_SET_FNS);
iowrite32(ts->tv_nsec, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_SET_NS);
iowrite32(ts->tv_sec & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_SET_SEC_L);
iowrite32(ts->tv_sec >> 32, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_SET_SEC_H);
return 0;
return 0;
}
static int mqnic_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
struct timespec64 ts;
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
struct timespec64 ts;
dev_info(mdev->dev, "mqnic_phc_adjtime delta: %lld", delta);
dev_info(mdev->dev, "mqnic_phc_adjtime delta: %lld", delta);
if (delta > 1000000000 || delta < -1000000000)
{
mqnic_phc_gettime(ptp, &ts);
ts = timespec64_add(ts, ns_to_timespec64(delta));
mqnic_phc_settime(ptp, &ts);
}
else
{
iowrite32(0, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_ADJ_FNS);
iowrite32(delta & 0xffffffff, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_ADJ_NS);
iowrite32(1, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_ADJ_COUNT);
}
if (delta > 1000000000 || delta < -1000000000) {
mqnic_phc_gettime(ptp, &ts);
ts = timespec64_add(ts, ns_to_timespec64(delta));
mqnic_phc_settime(ptp, &ts);
} else {
iowrite32(0, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_FNS);
iowrite32(delta & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_NS);
iowrite32(1, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_COUNT);
}
return 0;
return 0;
}
static int mqnic_phc_perout(struct ptp_clock_info *ptp, int on, struct ptp_perout_request *perout)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
u8 __iomem *hw_addr;
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
u8 __iomem *hw_addr;
u64 start_sec, period_sec, width_sec;
u32 start_nsec, period_nsec, width_nsec;
u64 start_sec, period_sec, width_sec;
u32 start_nsec, period_nsec, width_nsec;
if (perout->index >= mdev->ptp_clock_info.n_per_out)
{
return -EINVAL;
}
if (perout->index >= mdev->ptp_clock_info.n_per_out)
return -EINVAL;
hw_addr = mdev->phc_hw_addr + MQNIC_PHC_PEROUT_OFFSET;
hw_addr = mdev->phc_hw_addr + MQNIC_PHC_PEROUT_OFFSET;
if (!on)
{
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_CTRL);
if (!on) {
iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_CTRL);
return 0;
}
return 0;
}
start_nsec = perout->start.nsec;
start_sec = start_nsec / NSEC_PER_SEC;
start_nsec -= start_sec * NSEC_PER_SEC;
start_sec += perout->start.sec;
start_nsec = perout->start.nsec;
start_sec = start_nsec / NSEC_PER_SEC;
start_nsec -= start_sec * NSEC_PER_SEC;
start_sec += perout->start.sec;
period_nsec = perout->period.nsec;
period_sec = period_nsec / NSEC_PER_SEC;
period_nsec -= period_sec * NSEC_PER_SEC;
period_sec += perout->period.sec;
period_nsec = perout->period.nsec;
period_sec = period_nsec / NSEC_PER_SEC;
period_nsec -= period_sec * NSEC_PER_SEC;
period_sec += perout->period.sec;
// set width to half of period
width_sec = period_sec >> 1;
width_nsec = (period_nsec + (period_sec & 1 ? NSEC_PER_SEC : 0)) >> 1;
// set width to half of period
width_sec = period_sec >> 1;
width_nsec = (period_nsec + (period_sec & 1 ? NSEC_PER_SEC : 0)) >> 1;
dev_info(mdev->dev, "mqnic_phc_perout start: %lld.%09d", start_sec, start_nsec);
dev_info(mdev->dev, "mqnic_phc_perout period: %lld.%09d", period_sec, period_nsec);
dev_info(mdev->dev, "mqnic_phc_perout width: %lld.%09d", width_sec, width_nsec);
dev_info(mdev->dev, "mqnic_phc_perout start: %lld.%09d", start_sec, start_nsec);
dev_info(mdev->dev, "mqnic_phc_perout period: %lld.%09d", period_sec, period_nsec);
dev_info(mdev->dev, "mqnic_phc_perout width: %lld.%09d", width_sec, width_nsec);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_START_FNS);
iowrite32(start_nsec, hw_addr+MQNIC_PHC_REG_PEROUT_START_NS);
iowrite32(start_sec & 0xffffffff, hw_addr+MQNIC_PHC_REG_PEROUT_START_SEC_L);
iowrite32(start_sec >> 32, hw_addr+MQNIC_PHC_REG_PEROUT_START_SEC_H);
iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_START_FNS);
iowrite32(start_nsec, hw_addr + MQNIC_PHC_REG_PEROUT_START_NS);
iowrite32(start_sec & 0xffffffff, hw_addr + MQNIC_PHC_REG_PEROUT_START_SEC_L);
iowrite32(start_sec >> 32, hw_addr + MQNIC_PHC_REG_PEROUT_START_SEC_H);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_FNS);
iowrite32(period_nsec, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_NS);
iowrite32(period_sec & 0xffffffff, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_SEC_L);
iowrite32(period_sec >> 32, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_SEC_H);
iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_PERIOD_FNS);
iowrite32(period_nsec, hw_addr + MQNIC_PHC_REG_PEROUT_PERIOD_NS);
iowrite32(period_sec & 0xffffffff, hw_addr + MQNIC_PHC_REG_PEROUT_PERIOD_SEC_L);
iowrite32(period_sec >> 32, hw_addr + MQNIC_PHC_REG_PEROUT_PERIOD_SEC_H);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_FNS);
iowrite32(width_nsec, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_NS);
iowrite32(width_sec & 0xffffffff, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_SEC_L);
iowrite32(width_sec >> 32, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_SEC_H);
iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_WIDTH_FNS);
iowrite32(width_nsec, hw_addr + MQNIC_PHC_REG_PEROUT_WIDTH_NS);
iowrite32(width_sec & 0xffffffff, hw_addr + MQNIC_PHC_REG_PEROUT_WIDTH_SEC_L);
iowrite32(width_sec >> 32, hw_addr + MQNIC_PHC_REG_PEROUT_WIDTH_SEC_H);
iowrite32(1, hw_addr+MQNIC_PHC_REG_PEROUT_CTRL);
iowrite32(1, hw_addr + MQNIC_PHC_REG_PEROUT_CTRL);
return 0;
return 0;
}
static int mqnic_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
{
if (request)
{
switch (request->type)
{
case PTP_CLK_REQ_EXTTS:
return -EINVAL;
case PTP_CLK_REQ_PEROUT:
return mqnic_phc_perout(ptp, on, &request->perout);
case PTP_CLK_REQ_PPS:
return -EINVAL;
default:
return -EINVAL;
}
}
else
{
return -EINVAL;
}
if (!request)
return -EINVAL;
switch (request->type) {
case PTP_CLK_REQ_EXTTS:
return -EINVAL;
case PTP_CLK_REQ_PEROUT:
return mqnic_phc_perout(ptp, on, &request->perout);
case PTP_CLK_REQ_PPS:
return -EINVAL;
default:
return -EINVAL;
}
}
static void mqnic_phc_set_from_system_clock(struct ptp_clock_info *ptp)
{
struct timespec64 ts;
struct timespec64 ts;
#ifdef ktime_get_clocktai_ts64
ktime_get_clocktai_ts64(&ts);
#ifdef ktime_get_clocktai_ts64
ktime_get_clocktai_ts64(&ts);
#else
ts = ktime_to_timespec64(ktime_get_clocktai());
ts = ktime_to_timespec64(ktime_get_clocktai());
#endif
mqnic_phc_settime(ptp, &ts);
mqnic_phc_settime(ptp, &ts);
}
void mqnic_register_phc(struct mqnic_dev *mdev)
{
u32 phc_features;
u32 phc_features;
if (mdev->ptp_clock)
{
return;
}
if (mdev->ptp_clock) {
dev_warn(mdev->dev, "PTP clock already registered");
return;
}
phc_features = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_FEATURES);
phc_features = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_FEATURES);
mdev->ptp_clock_info.owner = THIS_MODULE;
mdev->ptp_clock_info.max_adj = 100000000,
mdev->ptp_clock_info.n_alarm = 0,
mdev->ptp_clock_info.n_ext_ts = 0,
mdev->ptp_clock_info.n_per_out = phc_features & 0xff,
mdev->ptp_clock_info.n_pins = 0,
mdev->ptp_clock_info.pps = 0,
mdev->ptp_clock_info.adjfine = mqnic_phc_adjfine,
mdev->ptp_clock_info.adjtime = mqnic_phc_adjtime,
mdev->ptp_clock_info.gettime64 = mqnic_phc_gettime,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)
mdev->ptp_clock_info.gettimex64 = mqnic_phc_gettimex,
mdev->ptp_clock_info.owner = THIS_MODULE;
mdev->ptp_clock_info.max_adj = 100000000;
mdev->ptp_clock_info.n_alarm = 0;
mdev->ptp_clock_info.n_ext_ts = 0;
mdev->ptp_clock_info.n_per_out = phc_features & 0xff;
mdev->ptp_clock_info.n_pins = 0;
mdev->ptp_clock_info.pps = 0;
mdev->ptp_clock_info.adjfine = mqnic_phc_adjfine;
mdev->ptp_clock_info.adjtime = mqnic_phc_adjtime;
mdev->ptp_clock_info.gettime64 = mqnic_phc_gettime;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
mdev->ptp_clock_info.gettimex64 = mqnic_phc_gettimex;
#endif
mdev->ptp_clock_info.settime64 = mqnic_phc_settime,
mdev->ptp_clock_info.enable = mqnic_phc_enable,
mdev->ptp_clock_info.settime64 = mqnic_phc_settime;
mdev->ptp_clock_info.enable = mqnic_phc_enable;
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, mdev->dev);
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, mdev->dev);
if (IS_ERR(mdev->ptp_clock)) {
mdev->ptp_clock = NULL;
dev_err(mdev->dev, "ptp_clock_register failed");
} else {
dev_info(mdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock));
if (IS_ERR(mdev->ptp_clock))
{
mdev->ptp_clock = NULL;
dev_err(mdev->dev, "ptp_clock_register failed");
}
else
{
dev_info(mdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock));
mqnic_phc_set_from_system_clock(&mdev->ptp_clock_info);
}
mqnic_phc_set_from_system_clock(&mdev->ptp_clock_info);
}
}
void mqnic_unregister_phc(struct mqnic_dev *mdev)
{
if (mdev->ptp_clock)
{
ptp_clock_unregister(mdev->ptp_clock);
mdev->ptp_clock = NULL;
dev_info(mdev->dev, "unregistered PHC");
}
if (mdev->ptp_clock) {
ptp_clock_unregister(mdev->ptp_clock);
mdev->ptp_clock = NULL;
dev_info(mdev->dev, "unregistered PHC");
}
}

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -33,392 +33,386 @@ either expressed or implied, of The Regents of the University of California.
#include "mqnic.h"
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr)
{
struct device *dev = priv->dev;
struct mqnic_ring *ring;
int ret;
struct device *dev = priv->dev;
struct mqnic_ring *ring;
int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
{
dev_err(dev, "Failed to allocate RX ring");
return -ENOMEM;
}
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
dev_err(dev, "Failed to allocate RX ring");
return -ENOMEM;
}
ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size-1;
ring->stride = roundup_pow_of_two(stride);
ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size - 1;
ring->stride = roundup_pow_of_two(stride);
ring->desc_block_size = ring->stride/MQNIC_DESC_SIZE;
ring->log_desc_block_size = ring->desc_block_size < 2 ? 0 : ilog2(ring->desc_block_size-1)+1;
ring->desc_block_size = 1 << ring->log_desc_block_size;
ring->desc_block_size = ring->stride / MQNIC_DESC_SIZE;
ring->log_desc_block_size = ring->desc_block_size < 2 ? 0 : ilog2(ring->desc_block_size - 1) + 1;
ring->desc_block_size = 1 << ring->log_desc_block_size;
ring->rx_info = kvzalloc(sizeof(*ring->rx_info)*ring->size, GFP_KERNEL);
if (!ring->rx_info)
{
dev_err(dev, "Failed to allocate rx_info");
ret = -ENOMEM;
goto fail_ring;
}
ring->rx_info = kvzalloc(sizeof(*ring->rx_info) * ring->size, GFP_KERNEL);
if (!ring->rx_info) {
dev_err(dev, "Failed to allocate rx_info");
ret = -ENOMEM;
goto fail_ring;
}
ring->buf_size = ring->size*ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf)
{
dev_err(dev, "Failed to allocate RX ring DMA buffer");
ret = -ENOMEM;
goto fail_info;
}
ring->buf_size = ring->size * ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
&ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf) {
dev_err(dev, "Failed to allocate RX ring DMA buffer");
ret = -ENOMEM;
goto fail_info;
}
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_QUEUE_TAIL_PTR_REG;
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr + MQNIC_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr + MQNIC_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->clean_tail_ptr = 0;
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->clean_tail_ptr = 0;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4);
// set completion queue index
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// deactivate queue
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 4);
// set completion queue index
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring;
return 0;
*ring_ptr = ring;
return 0;
fail_info:
kvfree(ring->rx_info);
ring->rx_info = NULL;
kvfree(ring->rx_info);
ring->rx_info = NULL;
fail_ring:
kfree(ring);
*ring_ptr = NULL;
return ret;
kfree(ring);
*ring_ptr = NULL;
return ret;
}
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr)
{
struct device *dev = priv->dev;
struct mqnic_ring *ring = *ring_ptr;
*ring_ptr = NULL;
struct device *dev = priv->dev;
struct mqnic_ring *ring = *ring_ptr;
*ring_ptr = NULL;
mqnic_deactivate_rx_ring(priv, ring);
mqnic_deactivate_rx_ring(priv, ring);
mqnic_free_rx_buf(priv, ring);
mqnic_free_rx_buf(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kvfree(ring->rx_info);
ring->rx_info = NULL;
kfree(ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kvfree(ring->rx_info);
ring->rx_info = NULL;
kfree(ring);
}
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index)
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
int cpl_index)
{
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4);
// set completion queue index
iowrite32(cpl_index, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// deactivate queue
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 4);
// set completion queue index
iowrite32(cpl_index, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK,
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
mqnic_refill_rx_buffers(priv, ring);
mqnic_refill_rx_buffers(priv, ring);
return 0;
return 0;
}
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
{
// deactivate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// deactivate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
}
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring)
{
return ring->head_ptr == ring->clean_tail_ptr;
return ring->head_ptr == ring->clean_tail_ptr;
}
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring)
{
return ring->head_ptr - ring->clean_tail_ptr >= ring->size;
return ring->head_ptr - ring->clean_tail_ptr >= ring->size;
}
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring)
{
ring->tail_ptr += (ioread32(ring->hw_tail_ptr) - ring->tail_ptr) & ring->hw_ptr_mask;
ring->tail_ptr += (ioread32(ring->hw_tail_ptr) - ring->tail_ptr) & ring->hw_ptr_mask;
}
void mqnic_rx_write_head_ptr(struct mqnic_ring *ring)
{
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
}
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index)
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index)
{
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
struct page *page = rx_info->page;
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
struct page *page = rx_info->page;
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr), dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
rx_info->dma_addr = 0;
__free_pages(page, rx_info->page_order);
rx_info->page = NULL;
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr),
dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
rx_info->dma_addr = 0;
__free_pages(page, rx_info->page_order);
rx_info->page = NULL;
}
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
{
u32 index;
int cnt = 0;
u32 index;
int cnt = 0;
while (!mqnic_is_rx_ring_empty(ring))
{
index = ring->clean_tail_ptr & ring->size_mask;
mqnic_free_rx_desc(priv, ring, index);
ring->clean_tail_ptr++;
cnt++;
}
while (!mqnic_is_rx_ring_empty(ring)) {
index = ring->clean_tail_ptr & ring->size_mask;
mqnic_free_rx_desc(priv, ring, index);
ring->clean_tail_ptr++;
cnt++;
}
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->clean_tail_ptr = 0;
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->clean_tail_ptr = 0;
return cnt;
return cnt;
}
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index)
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index)
{
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
struct mqnic_desc *rx_desc = (struct mqnic_desc *)(ring->buf + index*ring->stride);
struct page *page = rx_info->page;
u32 page_order = ring->page_order;
u32 len = PAGE_SIZE << page_order;
dma_addr_t dma_addr;
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
struct mqnic_desc *rx_desc = (struct mqnic_desc *)(ring->buf + index * ring->stride);
struct page *page = rx_info->page;
u32 page_order = ring->page_order;
u32 len = PAGE_SIZE << page_order;
dma_addr_t dma_addr;
if (unlikely(page))
{
dev_err(priv->dev, "mqnic_prepare_rx_desc skb not yet processed on port %d", priv->port);
return -1;
}
if (unlikely(page)) {
dev_err(priv->dev, "mqnic_prepare_rx_desc skb not yet processed on port %d",
priv->port);
return -1;
}
page = dev_alloc_pages(page_order);
if (unlikely(!page))
{
dev_err(priv->dev, "mqnic_prepare_rx_desc failed to allocate memory on port %d", priv->port);
return -1;
}
page = dev_alloc_pages(page_order);
if (unlikely(!page)) {
dev_err(priv->dev, "mqnic_prepare_rx_desc failed to allocate memory on port %d",
priv->port);
return -1;
}
// map page
dma_addr = dma_map_page(priv->dev, page, 0, len, PCI_DMA_FROMDEVICE);
// map page
dma_addr = dma_map_page(priv->dev, page, 0, len, PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
{
dev_err(priv->dev, "mqnic_prepare_rx_desc DMA mapping failed on port %d", priv->port);
__free_pages(page, page_order);
return -1;
}
if (unlikely(dma_mapping_error(priv->dev, dma_addr))) {
dev_err(priv->dev, "mqnic_prepare_rx_desc DMA mapping failed on port %d",
priv->port);
__free_pages(page, page_order);
return -1;
}
// write descriptor
rx_desc->len = cpu_to_le32(len);
rx_desc->addr = cpu_to_le64(dma_addr);
// write descriptor
rx_desc->len = cpu_to_le32(len);
rx_desc->addr = cpu_to_le64(dma_addr);
// update rx_info
rx_info->page = page;
rx_info->page_order = page_order;
rx_info->page_offset = 0;
rx_info->dma_addr = dma_addr;
rx_info->len = len;
// update rx_info
rx_info->page = page;
rx_info->page_order = page_order;
rx_info->page_offset = 0;
rx_info->dma_addr = dma_addr;
rx_info->len = len;
return 0;
return 0;
}
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring)
{
u32 missing = ring->size - (ring->head_ptr - ring->clean_tail_ptr);
u32 missing = ring->size - (ring->head_ptr - ring->clean_tail_ptr);
if (missing < 8)
return;
if (missing < 8)
return;
for ( ; missing-- > 0; )
{
if (mqnic_prepare_rx_desc(priv, ring, ring->head_ptr & ring->size_mask))
break;
ring->head_ptr++;
}
for (; missing-- > 0;) {
if (mqnic_prepare_rx_desc(priv, ring, ring->head_ptr & ring->size_mask))
break;
ring->head_ptr++;
}
// enqueue on NIC
dma_wmb();
mqnic_rx_write_head_ptr(ring);
// enqueue on NIC
dma_wmb();
mqnic_rx_write_head_ptr(ring);
}
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget)
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
int napi_budget)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring = priv->rx_ring[cq_ring->ring_index];
struct mqnic_rx_info *rx_info;
struct mqnic_cpl *cpl;
struct sk_buff *skb;
struct page *page;
u32 cq_index;
u32 cq_tail_ptr;
u32 ring_index;
u32 ring_clean_tail_ptr;
int done = 0;
int budget = napi_budget;
u32 len;
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring = priv->rx_ring[cq_ring->ring_index];
struct mqnic_rx_info *rx_info;
struct mqnic_cpl *cpl;
struct sk_buff *skb;
struct page *page;
u32 cq_index;
u32 cq_tail_ptr;
u32 ring_index;
u32 ring_clean_tail_ptr;
int done = 0;
int budget = napi_budget;
u32 len;
if (unlikely(!priv->port_up))
{
return done;
}
if (unlikely(!priv->port_up))
return done;
// process completion queue
// read head pointer from NIC
mqnic_cq_read_head_ptr(cq_ring);
// process completion queue
// read head pointer from NIC
mqnic_cq_read_head_ptr(cq_ring);
cq_tail_ptr = cq_ring->tail_ptr;
cq_index = cq_tail_ptr & cq_ring->size_mask;
cq_tail_ptr = cq_ring->tail_ptr;
cq_index = cq_tail_ptr & cq_ring->size_mask;
mb(); // is a barrier here necessary? If so, what kind?
mb(); // is a barrier here necessary? If so, what kind?
while (cq_ring->head_ptr != cq_tail_ptr && done < budget)
{
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index*cq_ring->stride);
ring_index = le16_to_cpu(cpl->index) & ring->size_mask;
rx_info = &ring->rx_info[ring_index];
page = rx_info->page;
while (cq_ring->head_ptr != cq_tail_ptr && done < budget) {
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * cq_ring->stride);
ring_index = le16_to_cpu(cpl->index) & ring->size_mask;
rx_info = &ring->rx_info[ring_index];
page = rx_info->page;
if (unlikely(!page))
{
dev_err(priv->dev, "mqnic_process_rx_cq ring %d null page at index %d", cq_ring->ring_index, ring_index);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, cpl, MQNIC_CPL_SIZE, true);
break;
}
if (unlikely(!page)) {
dev_err(priv->dev, "mqnic_process_rx_cq ring %d null page at index %d",
cq_ring->ring_index, ring_index);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
cpl, MQNIC_CPL_SIZE, true);
break;
}
skb = napi_get_frags(&cq_ring->napi);
if (unlikely(!skb))
{
dev_err(priv->dev, "mqnic_process_rx_cq ring %d failed to allocate skb", cq_ring->ring_index);
break;
}
skb = napi_get_frags(&cq_ring->napi);
if (unlikely(!skb)) {
dev_err(priv->dev, "mqnic_process_rx_cq ring %d failed to allocate skb",
cq_ring->ring_index);
break;
}
// RX hardware timestamp
if (priv->if_features & MQNIC_IF_FEATURE_PTP_TS)
{
skb_hwtstamps(skb)->hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
}
// RX hardware timestamp
if (priv->if_features & MQNIC_IF_FEATURE_PTP_TS)
skb_hwtstamps(skb)->hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
skb_record_rx_queue(skb, cq_ring->ring_index);
skb_record_rx_queue(skb, cq_ring->ring_index);
// RX hardware checksum
if (ndev->features & NETIF_F_RXCSUM)
{
skb->csum = csum_unfold((__sum16)cpu_to_be16(le16_to_cpu(cpl->rx_csum)));
skb->ip_summed = CHECKSUM_COMPLETE;
}
// RX hardware checksum
if (ndev->features & NETIF_F_RXCSUM) {
skb->csum = csum_unfold((__sum16) cpu_to_be16(le16_to_cpu(cpl->rx_csum)));
skb->ip_summed = CHECKSUM_COMPLETE;
}
// unmap
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr), dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
rx_info->dma_addr = 0;
// unmap
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr),
dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
rx_info->dma_addr = 0;
len = min_t(u32, le16_to_cpu(cpl->len), rx_info->len);
len = min_t(u32, le16_to_cpu(cpl->len), rx_info->len);
dma_sync_single_range_for_cpu(priv->dev, rx_info->dma_addr, rx_info->page_offset, rx_info->len, PCI_DMA_FROMDEVICE);
dma_sync_single_range_for_cpu(priv->dev, rx_info->dma_addr, rx_info->page_offset,
rx_info->len, PCI_DMA_FROMDEVICE);
__skb_fill_page_desc(skb, 0, page, rx_info->page_offset, len);
rx_info->page = NULL;
__skb_fill_page_desc(skb, 0, page, rx_info->page_offset, len);
rx_info->page = NULL;
skb_shinfo(skb)->nr_frags = 1;
skb->len = len;
skb->data_len = len;
skb->truesize += rx_info->len;
skb_shinfo(skb)->nr_frags = 1;
skb->len = len;
skb->data_len = len;
skb->truesize += rx_info->len;
// hand off SKB
napi_gro_frags(&cq_ring->napi);
// hand off SKB
napi_gro_frags(&cq_ring->napi);
ring->packets++;
ring->bytes += le16_to_cpu(cpl->len);
ring->packets++;
ring->bytes += le16_to_cpu(cpl->len);
done++;
done++;
cq_tail_ptr++;
cq_index = cq_tail_ptr & cq_ring->size_mask;
}
cq_tail_ptr++;
cq_index = cq_tail_ptr & cq_ring->size_mask;
}
// update CQ tail
cq_ring->tail_ptr = cq_tail_ptr;
mqnic_cq_write_tail_ptr(cq_ring);
// update CQ tail
cq_ring->tail_ptr = cq_tail_ptr;
mqnic_cq_write_tail_ptr(cq_ring);
// process ring
// read tail pointer from NIC
mqnic_rx_read_tail_ptr(ring);
// process ring
// read tail pointer from NIC
mqnic_rx_read_tail_ptr(ring);
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
ring_index = ring_clean_tail_ptr & ring->size_mask;
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
ring_index = ring_clean_tail_ptr & ring->size_mask;
while (ring_clean_tail_ptr != ring->tail_ptr)
{
rx_info = &ring->rx_info[ring_index];
while (ring_clean_tail_ptr != ring->tail_ptr) {
rx_info = &ring->rx_info[ring_index];
if (rx_info->page)
break;
if (rx_info->page)
break;
ring_clean_tail_ptr++;
ring_index = ring_clean_tail_ptr & ring->size_mask;
}
ring_clean_tail_ptr++;
ring_index = ring_clean_tail_ptr & ring->size_mask;
}
// update ring tail
WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr);
// update ring tail
WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr);
// replenish buffers
mqnic_refill_rx_buffers(priv, ring);
// replenish buffers
mqnic_refill_rx_buffers(priv, ring);
return done;
return done;
}
void mqnic_rx_irq(struct mqnic_cq_ring *cq)
{
struct mqnic_priv *priv = netdev_priv(cq->ndev);
struct mqnic_priv *priv = netdev_priv(cq->ndev);
if (likely(priv->port_up))
{
napi_schedule_irqoff(&cq->napi);
}
else
{
mqnic_arm_cq(cq);
}
if (likely(priv->port_up))
napi_schedule_irqoff(&cq->napi);
else
mqnic_arm_cq(cq);
}
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget)
{
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
struct net_device *ndev = cq_ring->ndev;
int done;
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
struct net_device *ndev = cq_ring->ndev;
int done;
done = mqnic_process_rx_cq(ndev, cq_ring, budget);
done = mqnic_process_rx_cq(ndev, cq_ring, budget);
if (done == budget)
{
return done;
}
if (done == budget)
return done;
napi_complete(napi);
napi_complete(napi);
mqnic_arm_cq(cq_ring);
mqnic_arm_cq(cq_ring);
return done;
return done;
}

View File

@ -1,6 +1,6 @@
/*
Copyright 2019, The Regents of the University of California.
Copyright 2019-2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -34,520 +34,487 @@ either expressed or implied, of The Regents of the University of California.
#include <linux/version.h>
#include "mqnic.h"
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr)
{
struct device *dev = priv->dev;
struct mqnic_ring *ring;
int ret;
struct device *dev = priv->dev;
struct mqnic_ring *ring;
int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
{
dev_err(dev, "Failed to allocate TX ring");
return -ENOMEM;
}
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
dev_err(dev, "Failed to allocate TX ring");
return -ENOMEM;
}
ring->size = roundup_pow_of_two(size);
ring->full_size = ring->size >> 1;
ring->size_mask = ring->size-1;
ring->stride = roundup_pow_of_two(stride);
ring->size = roundup_pow_of_two(size);
ring->full_size = ring->size >> 1;
ring->size_mask = ring->size - 1;
ring->stride = roundup_pow_of_two(stride);
ring->desc_block_size = ring->stride/MQNIC_DESC_SIZE;
ring->log_desc_block_size = ring->desc_block_size < 2 ? 0 : ilog2(ring->desc_block_size-1)+1;
ring->desc_block_size = 1 << ring->log_desc_block_size;
ring->desc_block_size = ring->stride / MQNIC_DESC_SIZE;
ring->log_desc_block_size = ring->desc_block_size < 2 ? 0 : ilog2(ring->desc_block_size - 1) + 1;
ring->desc_block_size = 1 << ring->log_desc_block_size;
ring->tx_info = kvzalloc(sizeof(*ring->tx_info)*ring->size, GFP_KERNEL);
if (!ring->tx_info)
{
dev_err(dev, "Failed to allocate tx_info");
ret = -ENOMEM;
goto fail_ring;
}
ring->tx_info = kvzalloc(sizeof(*ring->tx_info) * ring->size, GFP_KERNEL);
if (!ring->tx_info) {
dev_err(dev, "Failed to allocate tx_info");
ret = -ENOMEM;
goto fail_ring;
}
ring->buf_size = ring->size*ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf)
{
dev_err(dev, "Failed to allocate TX ring DMA buffer");
ret = -ENOMEM;
goto fail_info;
}
ring->buf_size = ring->size * ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
&ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf) {
dev_err(dev, "Failed to allocate TX ring DMA buffer");
ret = -ENOMEM;
goto fail_info;
}
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_QUEUE_TAIL_PTR_REG;
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr + MQNIC_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr + MQNIC_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->clean_tail_ptr = 0;
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->clean_tail_ptr = 0;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4);
// set completion queue index
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// deactivate queue
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 4);
// set completion queue index
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring;
return 0;
*ring_ptr = ring;
return 0;
fail_info:
kvfree(ring->tx_info);
ring->tx_info = NULL;
kvfree(ring->tx_info);
ring->tx_info = NULL;
fail_ring:
kfree(ring);
*ring_ptr = NULL;
return ret;
kfree(ring);
*ring_ptr = NULL;
return ret;
}
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr)
{
struct device *dev = priv->dev;
struct mqnic_ring *ring = *ring_ptr;
*ring_ptr = NULL;
struct device *dev = priv->dev;
struct mqnic_ring *ring = *ring_ptr;
*ring_ptr = NULL;
mqnic_deactivate_tx_ring(priv, ring);
mqnic_deactivate_tx_ring(priv, ring);
mqnic_free_tx_buf(priv, ring);
mqnic_free_tx_buf(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kvfree(ring->tx_info);
ring->tx_info = NULL;
kfree(ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kvfree(ring->tx_info);
ring->tx_info = NULL;
kfree(ring);
}
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index)
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
int cpl_index)
{
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4);
// set completion queue index
iowrite32(cpl_index, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// deactivate queue
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 4);
// set completion queue index
iowrite32(cpl_index, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK,
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
return 0;
return 0;
}
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
{
// deactivate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// deactivate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
}
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring)
{
return ring->head_ptr == ring->clean_tail_ptr;
return ring->head_ptr == ring->clean_tail_ptr;
}
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring)
{
return ring->head_ptr - ring->clean_tail_ptr >= ring->full_size;
return ring->head_ptr - ring->clean_tail_ptr >= ring->full_size;
}
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring)
{
ring->tail_ptr += (ioread32(ring->hw_tail_ptr) - ring->tail_ptr) & ring->hw_ptr_mask;
ring->tail_ptr += (ioread32(ring->hw_tail_ptr) - ring->tail_ptr) & ring->hw_ptr_mask;
}
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring)
{
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
}
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index, int napi_budget)
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index, int napi_budget)
{
struct mqnic_tx_info *tx_info = &ring->tx_info[index];
struct sk_buff *skb = tx_info->skb;
u32 i;
struct mqnic_tx_info *tx_info = &ring->tx_info[index];
struct sk_buff *skb = tx_info->skb;
u32 i;
prefetchw(&skb->users);
prefetchw(&skb->users);
dma_unmap_single(priv->dev, dma_unmap_addr(tx_info, dma_addr), dma_unmap_len(tx_info, len), PCI_DMA_TODEVICE);
dma_unmap_addr_set(tx_info, dma_addr, 0);
dma_unmap_single(priv->dev, dma_unmap_addr(tx_info, dma_addr),
dma_unmap_len(tx_info, len), PCI_DMA_TODEVICE);
dma_unmap_addr_set(tx_info, dma_addr, 0);
// unmap frags
for (i = 0; i < tx_info->frag_count; i++)
{
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr, tx_info->frags[i].len, PCI_DMA_TODEVICE);
}
// unmap frags
for (i = 0; i < tx_info->frag_count; i++)
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr,
tx_info->frags[i].len, PCI_DMA_TODEVICE);
napi_consume_skb(skb, napi_budget);
tx_info->skb = NULL;
napi_consume_skb(skb, napi_budget);
tx_info->skb = NULL;
}
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
{
u32 index;
int cnt = 0;
u32 index;
int cnt = 0;
while (!mqnic_is_tx_ring_empty(ring))
{
index = ring->clean_tail_ptr & ring->size_mask;
mqnic_free_tx_desc(priv, ring, index, 0);
ring->clean_tail_ptr++;
cnt++;
}
while (!mqnic_is_tx_ring_empty(ring)) {
index = ring->clean_tail_ptr & ring->size_mask;
mqnic_free_tx_desc(priv, ring, index, 0);
ring->clean_tail_ptr++;
cnt++;
}
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->clean_tail_ptr = 0;
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->clean_tail_ptr = 0;
return cnt;
return cnt;
}
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget)
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
int napi_budget)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring = priv->tx_ring[cq_ring->ring_index];
struct mqnic_tx_info *tx_info;
struct mqnic_cpl *cpl;
u32 cq_index;
u32 cq_tail_ptr;
u32 ring_index;
u32 ring_clean_tail_ptr;
u32 packets = 0;
u32 bytes = 0;
int done = 0;
int budget = napi_budget;
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring = priv->tx_ring[cq_ring->ring_index];
struct mqnic_tx_info *tx_info;
struct mqnic_cpl *cpl;
u32 cq_index;
u32 cq_tail_ptr;
u32 ring_index;
u32 ring_clean_tail_ptr;
u32 packets = 0;
u32 bytes = 0;
int done = 0;
int budget = napi_budget;
if (unlikely(!priv->port_up))
{
return done;
}
if (unlikely(!priv->port_up))
return done;
// prefetch for BQL
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
// prefetch for BQL
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
// process completion queue
// read head pointer from NIC
mqnic_cq_read_head_ptr(cq_ring);
// process completion queue
// read head pointer from NIC
mqnic_cq_read_head_ptr(cq_ring);
cq_tail_ptr = cq_ring->tail_ptr;
cq_index = cq_tail_ptr & cq_ring->size_mask;
cq_tail_ptr = cq_ring->tail_ptr;
cq_index = cq_tail_ptr & cq_ring->size_mask;
while (cq_ring->head_ptr != cq_tail_ptr && done < budget)
{
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index*cq_ring->stride);
ring_index = le16_to_cpu(cpl->index) & ring->size_mask;
tx_info = &ring->tx_info[ring_index];
while (cq_ring->head_ptr != cq_tail_ptr && done < budget) {
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * cq_ring->stride);
ring_index = le16_to_cpu(cpl->index) & ring->size_mask;
tx_info = &ring->tx_info[ring_index];
// TX hardware timestamp
if (unlikely(tx_info->ts_requested))
{
struct skb_shared_hwtstamps hwts;
dev_info(priv->dev, "mqnic_process_tx_cq TX TS requested");
hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
skb_tstamp_tx(tx_info->skb, &hwts);
}
// TX hardware timestamp
if (unlikely(tx_info->ts_requested)) {
struct skb_shared_hwtstamps hwts;
dev_info(priv->dev, "mqnic_process_tx_cq TX TS requested");
hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
skb_tstamp_tx(tx_info->skb, &hwts);
}
// free TX descriptor
mqnic_free_tx_desc(priv, ring, ring_index, napi_budget);
// free TX descriptor
mqnic_free_tx_desc(priv, ring, ring_index, napi_budget);
packets++;
bytes += le16_to_cpu(cpl->len);
packets++;
bytes += le16_to_cpu(cpl->len);
done++;
done++;
cq_tail_ptr++;
cq_index = cq_tail_ptr & cq_ring->size_mask;
}
cq_tail_ptr++;
cq_index = cq_tail_ptr & cq_ring->size_mask;
}
// update CQ tail
cq_ring->tail_ptr = cq_tail_ptr;
mqnic_cq_write_tail_ptr(cq_ring);
// update CQ tail
cq_ring->tail_ptr = cq_tail_ptr;
mqnic_cq_write_tail_ptr(cq_ring);
// process ring
// read tail pointer from NIC
mqnic_tx_read_tail_ptr(ring);
// process ring
// read tail pointer from NIC
mqnic_tx_read_tail_ptr(ring);
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
ring_index = ring_clean_tail_ptr & ring->size_mask;
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
ring_index = ring_clean_tail_ptr & ring->size_mask;
while (ring_clean_tail_ptr != ring->tail_ptr) {
tx_info = &ring->tx_info[ring_index];
while (ring_clean_tail_ptr != ring->tail_ptr)
{
tx_info = &ring->tx_info[ring_index];
if (tx_info->skb)
break;
if (tx_info->skb)
break;
ring_clean_tail_ptr++;
ring_index = ring_clean_tail_ptr & ring->size_mask;
}
ring_clean_tail_ptr++;
ring_index = ring_clean_tail_ptr & ring->size_mask;
}
// update ring tail
WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr);
// update ring tail
WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr);
// BQL
//netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
// BQL
//netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
// wake queue if it is stopped
if (netif_tx_queue_stopped(ring->tx_queue) && !mqnic_is_tx_ring_full(ring))
netif_tx_wake_queue(ring->tx_queue);
// wake queue if it is stopped
if (netif_tx_queue_stopped(ring->tx_queue) && !mqnic_is_tx_ring_full(ring))
{
netif_tx_wake_queue(ring->tx_queue);
}
return done;
return done;
}
void mqnic_tx_irq(struct mqnic_cq_ring *cq)
{
struct mqnic_priv *priv = netdev_priv(cq->ndev);
struct mqnic_priv *priv = netdev_priv(cq->ndev);
if (likely(priv->port_up))
{
napi_schedule_irqoff(&cq->napi);
}
else
{
mqnic_arm_cq(cq);
}
if (likely(priv->port_up))
napi_schedule_irqoff(&cq->napi);
else
mqnic_arm_cq(cq);
}
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
{
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
struct net_device *ndev = cq_ring->ndev;
int done;
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
struct net_device *ndev = cq_ring->ndev;
int done;
done = mqnic_process_tx_cq(ndev, cq_ring, budget);
done = mqnic_process_tx_cq(ndev, cq_ring, budget);
if (done == budget)
{
return done;
}
if (done == budget)
return done;
napi_complete(napi);
napi_complete(napi);
mqnic_arm_cq(cq_ring);
mqnic_arm_cq(cq_ring);
return done;
return done;
}
static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring, struct mqnic_tx_info *tx_info, struct mqnic_desc *tx_desc, struct sk_buff *skb)
static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring,
struct mqnic_tx_info *tx_info,
struct mqnic_desc *tx_desc, struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 i;
u32 len;
dma_addr_t dma_addr;
struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 i;
u32 len;
dma_addr_t dma_addr;
// update tx_info
tx_info->skb = skb;
tx_info->frag_count = 0;
// update tx_info
tx_info->skb = skb;
tx_info->frag_count = 0;
for (i = 0; i < shinfo->nr_frags; i++)
{
const skb_frag_t *frag = &shinfo->frags[i];
len = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
{
// mapping failed
goto map_error;
}
for (i = 0; i < shinfo->nr_frags; i++) {
const skb_frag_t *frag = &shinfo->frags[i];
len = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
// mapping failed
goto map_error;
// write descriptor
tx_desc[i+1].len = cpu_to_le32(len);
tx_desc[i+1].addr = cpu_to_le64(dma_addr);
// write descriptor
tx_desc[i + 1].len = cpu_to_le32(len);
tx_desc[i + 1].addr = cpu_to_le64(dma_addr);
// update tx_info
tx_info->frag_count = i+1;
tx_info->frags[i].len = len;
tx_info->frags[i].dma_addr = dma_addr;
}
// update tx_info
tx_info->frag_count = i + 1;
tx_info->frags[i].len = len;
tx_info->frags[i].dma_addr = dma_addr;
}
for (i = tx_info->frag_count; i < ring->desc_block_size-1; i++)
{
tx_desc[i+1].len = 0;
tx_desc[i+1].addr = 0;
}
for (i = tx_info->frag_count; i < ring->desc_block_size - 1; i++) {
tx_desc[i + 1].len = 0;
tx_desc[i + 1].addr = 0;
}
// map skb
len = skb_headlen(skb);
dma_addr = dma_map_single(priv->dev, skb->data, len, PCI_DMA_TODEVICE);
// map skb
len = skb_headlen(skb);
dma_addr = dma_map_single(priv->dev, skb->data, len, PCI_DMA_TODEVICE);
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
{
// mapping failed
goto map_error;
}
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
// mapping failed
goto map_error;
// write descriptor
tx_desc[0].len = cpu_to_le32(len);
tx_desc[0].addr = cpu_to_le64(dma_addr);
// write descriptor
tx_desc[0].len = cpu_to_le32(len);
tx_desc[0].addr = cpu_to_le64(dma_addr);
// update tx_info
dma_unmap_addr_set(tx_info, dma_addr, dma_addr);
dma_unmap_len_set(tx_info, len, len);
// update tx_info
dma_unmap_addr_set(tx_info, dma_addr, dma_addr);
dma_unmap_len_set(tx_info, len, len);
return true;
return true;
map_error:
dev_err(priv->dev, "mqnic_map_skb DMA mapping failed");
dev_err(priv->dev, "mqnic_map_skb DMA mapping failed");
// unmap frags
for (i = 0; i < tx_info->frag_count; i++)
{
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr, tx_info->frags[i].len, PCI_DMA_TODEVICE);
}
// unmap frags
for (i = 0; i < tx_info->frag_count; i++)
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr,
tx_info->frags[i].len, PCI_DMA_TODEVICE);
// update tx_info
tx_info->skb = NULL;
tx_info->frag_count = 0;
// update tx_info
tx_info->skb = NULL;
tx_info->frag_count = 0;
return false;
return false;
}
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring;
struct mqnic_tx_info *tx_info;
struct mqnic_desc *tx_desc;
int ring_index;
u32 index;
bool stop_queue;
u32 clean_tail_ptr;
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring;
struct mqnic_tx_info *tx_info;
struct mqnic_desc *tx_desc;
int ring_index;
u32 index;
bool stop_queue;
u32 clean_tail_ptr;
if (unlikely(!priv->port_up))
{
goto tx_drop;
}
if (unlikely(!priv->port_up))
goto tx_drop;
ring_index = skb_get_queue_mapping(skb);
ring_index = skb_get_queue_mapping(skb);
if (unlikely(ring_index >= priv->tx_queue_count))
{
// queue mapping out of range
goto tx_drop;
}
if (unlikely(ring_index >= priv->tx_queue_count))
// queue mapping out of range
goto tx_drop;
ring = priv->tx_ring[ring_index];
ring = priv->tx_ring[ring_index];
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
// prefetch for BQL
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
// prefetch for BQL
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
index = ring->head_ptr & ring->size_mask;
index = ring->head_ptr & ring->size_mask;
tx_desc = (struct mqnic_desc *)(ring->buf + index*ring->stride);
tx_desc = (struct mqnic_desc *)(ring->buf + index * ring->stride);
tx_info = &ring->tx_info[index];
tx_info = &ring->tx_info[index];
// TX hardware timestamp
tx_info->ts_requested = 0;
if (unlikely(priv->if_features & MQNIC_IF_FEATURE_PTP_TS && shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
dev_info(priv->dev, "mqnic_start_xmit TX TS requested");
shinfo->tx_flags |= SKBTX_IN_PROGRESS;
tx_info->ts_requested = 1;
}
// TX hardware timestamp
tx_info->ts_requested = 0;
if (unlikely(priv->if_features & MQNIC_IF_FEATURE_PTP_TS && shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
dev_info(priv->dev, "mqnic_start_xmit TX TS requested");
shinfo->tx_flags |= SKBTX_IN_PROGRESS;
tx_info->ts_requested = 1;
}
// TX hardware checksum
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned int csum_start = skb_checksum_start_offset(skb);
unsigned int csum_offset = skb->csum_offset;
// TX hardware checksum
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned int csum_start = skb_checksum_start_offset(skb);
unsigned int csum_offset = skb->csum_offset;
if (csum_start > 255 || csum_offset > 127)
{
dev_info(priv->dev, "mqnic_start_xmit Hardware checksum fallback start %d offset %d", csum_start, csum_offset);
if (csum_start > 255 || csum_offset > 127) {
dev_info(priv->dev, "mqnic_start_xmit Hardware checksum fallback start %d offset %d",
csum_start, csum_offset);
// offset out of range, fall back on software checksum
if (skb_checksum_help(skb))
{
// software checksumming failed
goto tx_drop_count;
}
tx_desc->tx_csum_cmd = 0;
}
else
{
tx_desc->tx_csum_cmd = cpu_to_le16(0x8000 | (csum_offset << 8) | (csum_start));
}
}
else
{
tx_desc->tx_csum_cmd = 0;
}
// offset out of range, fall back on software checksum
if (skb_checksum_help(skb)) {
// software checksumming failed
goto tx_drop_count;
}
tx_desc->tx_csum_cmd = 0;
} else {
tx_desc->tx_csum_cmd = cpu_to_le16(0x8000 | (csum_offset << 8) | (csum_start));
}
} else {
tx_desc->tx_csum_cmd = 0;
}
if (shinfo->nr_frags > ring->desc_block_size-1 || (skb->data_len && skb->data_len < 32))
{
// too many frags or very short data portion; linearize
if (skb_linearize(skb))
{
goto tx_drop_count;
}
}
if (shinfo->nr_frags > ring->desc_block_size - 1 || (skb->data_len && skb->data_len < 32)) {
// too many frags or very short data portion; linearize
if (skb_linearize(skb))
goto tx_drop_count;
}
// map skb
if (!mqnic_map_skb(priv, ring, tx_info, tx_desc, skb))
{
// map failed
goto tx_drop_count;
}
// map skb
if (!mqnic_map_skb(priv, ring, tx_info, tx_desc, skb))
// map failed
goto tx_drop_count;
// count packet
ring->packets++;
ring->bytes += skb->len;
// count packet
ring->packets++;
ring->bytes += skb->len;
// enqueue
ring->head_ptr++;
// enqueue
ring->head_ptr++;
skb_tx_timestamp(skb);
skb_tx_timestamp(skb);
stop_queue = mqnic_is_tx_ring_full(ring);
if (unlikely(stop_queue))
{
dev_info(priv->dev, "mqnic_start_xmit TX ring %d full on port %d", ring_index, priv->port);
netif_tx_stop_queue(ring->tx_queue);
}
stop_queue = mqnic_is_tx_ring_full(ring);
if (unlikely(stop_queue)) {
dev_info(priv->dev, "mqnic_start_xmit TX ring %d full on port %d",
ring_index, priv->port);
netif_tx_stop_queue(ring->tx_queue);
}
// BQL
//netdev_tx_sent_queue(ring->tx_queue, tx_info->len);
//__netdev_tx_sent_queue(ring->tx_queue, tx_info->len, skb->xmit_more);
// BQL
//netdev_tx_sent_queue(ring->tx_queue, tx_info->len);
//__netdev_tx_sent_queue(ring->tx_queue, tx_info->len, skb->xmit_more);
// enqueue on NIC
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0)
if (unlikely(!netdev_xmit_more() || stop_queue))
// enqueue on NIC
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
if (unlikely(!netdev_xmit_more() || stop_queue)) {
#else
if (unlikely(!skb->xmit_more || stop_queue))
if (unlikely(!skb->xmit_more || stop_queue)) {
#endif
{
dma_wmb();
mqnic_tx_write_head_ptr(ring);
}
dma_wmb();
mqnic_tx_write_head_ptr(ring);
}
// check if queue restarted
if (unlikely(stop_queue))
{
smp_rmb();
// check if queue restarted
if (unlikely(stop_queue)) {
smp_rmb();
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
if (unlikely(!mqnic_is_tx_ring_full(ring)))
{
netif_tx_wake_queue(ring->tx_queue);
}
}
if (unlikely(!mqnic_is_tx_ring_full(ring)))
netif_tx_wake_queue(ring->tx_queue);
}
return NETDEV_TX_OK;
return NETDEV_TX_OK;
tx_drop_count:
ring->dropped_packets++;
ring->dropped_packets++;
tx_drop:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}