1
0
mirror of https://github.com/corundum/corundum.git synced 2025-02-06 08:38:23 +08:00

Fix kernel module coding style

This commit is contained in:
Alex Forencich 2021-10-08 18:31:53 -07:00
parent 1bce5827c9
commit 5b49f09baa
15 changed files with 2696 additions and 2921 deletions

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -53,267 +53,266 @@ either expressed or implied, of The Regents of the University of California.
struct mqnic_dev; struct mqnic_dev;
struct mqnic_board_ops { struct mqnic_board_ops {
int (*init)(struct mqnic_dev *mqnic); int (*init)(struct mqnic_dev *mqnic);
void (*deinit)(struct mqnic_dev *mqnic); void (*deinit)(struct mqnic_dev *mqnic);
}; };
struct mqnic_i2c_bus struct mqnic_i2c_bus {
{ struct mqnic_dev *mqnic;
struct mqnic_dev *mqnic;
u8 __iomem *scl_in_reg; u8 __iomem *scl_in_reg;
u8 __iomem *scl_out_reg; u8 __iomem *scl_out_reg;
u8 __iomem *sda_in_reg; u8 __iomem *sda_in_reg;
u8 __iomem *sda_out_reg; u8 __iomem *sda_out_reg;
uint32_t scl_in_mask; uint32_t scl_in_mask;
uint32_t scl_out_mask; uint32_t scl_out_mask;
uint32_t sda_in_mask; uint32_t sda_in_mask;
uint32_t sda_out_mask; uint32_t sda_out_mask;
struct list_head head; struct list_head head;
struct i2c_algo_bit_data algo; struct i2c_algo_bit_data algo;
struct i2c_adapter adapter; struct i2c_adapter adapter;
}; };
struct mqnic_dev { struct mqnic_dev {
struct device *dev; struct device *dev;
struct pci_dev *pdev; struct pci_dev *pdev;
resource_size_t hw_regs_size; resource_size_t hw_regs_size;
phys_addr_t hw_regs_phys; phys_addr_t hw_regs_phys;
u8 __iomem *hw_addr; u8 __iomem *hw_addr;
u8 __iomem *phc_hw_addr; u8 __iomem *phc_hw_addr;
resource_size_t app_hw_regs_size; resource_size_t app_hw_regs_size;
phys_addr_t app_hw_regs_phys; phys_addr_t app_hw_regs_phys;
u8 __iomem *app_hw_addr; u8 __iomem *app_hw_addr;
resource_size_t ram_hw_regs_size; resource_size_t ram_hw_regs_size;
phys_addr_t ram_hw_regs_phys; phys_addr_t ram_hw_regs_phys;
u8 __iomem *ram_hw_addr; u8 __iomem *ram_hw_addr;
struct mutex state_lock; struct mutex state_lock;
int mac_count; int mac_count;
u8 mac_list[MQNIC_MAX_IF][ETH_ALEN]; u8 mac_list[MQNIC_MAX_IF][ETH_ALEN];
char name[16]; char name[16];
int irq_count; int irq_count;
int irq_map[32]; int irq_map[32];
unsigned int id; unsigned int id;
struct list_head dev_list_node; struct list_head dev_list_node;
struct miscdevice misc_dev; struct miscdevice misc_dev;
u32 fw_id; u32 fw_id;
u32 fw_ver; u32 fw_ver;
u32 board_id; u32 board_id;
u32 board_ver; u32 board_ver;
u32 phc_count; u32 phc_count;
u32 phc_offset; u32 phc_offset;
u32 if_count; u32 if_count;
u32 if_stride; u32 if_stride;
u32 if_csr_offset; u32 if_csr_offset;
struct net_device *ndev[MQNIC_MAX_IF]; struct net_device *ndev[MQNIC_MAX_IF];
struct ptp_clock *ptp_clock; struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info; struct ptp_clock_info ptp_clock_info;
struct mqnic_board_ops *board_ops; struct mqnic_board_ops *board_ops;
struct list_head i2c_bus; struct list_head i2c_bus;
int i2c_adapter_count; int i2c_adapter_count;
int mod_i2c_client_count; int mod_i2c_client_count;
struct i2c_client *mod_i2c_client[MQNIC_MAX_IF]; struct i2c_client *mod_i2c_client[MQNIC_MAX_IF];
struct i2c_client *eeprom_i2c_client; struct i2c_client *eeprom_i2c_client;
}; };
struct mqnic_frag { struct mqnic_frag {
dma_addr_t dma_addr; dma_addr_t dma_addr;
u32 len; u32 len;
}; };
struct mqnic_tx_info { struct mqnic_tx_info {
struct sk_buff *skb; struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(dma_addr); DEFINE_DMA_UNMAP_ADDR(dma_addr);
DEFINE_DMA_UNMAP_LEN(len); DEFINE_DMA_UNMAP_LEN(len);
u32 frag_count; u32 frag_count;
struct mqnic_frag frags[MQNIC_MAX_FRAGS-1]; struct mqnic_frag frags[MQNIC_MAX_FRAGS - 1];
int ts_requested; int ts_requested;
}; };
struct mqnic_rx_info { struct mqnic_rx_info {
struct page *page; struct page *page;
u32 page_order; u32 page_order;
u32 page_offset; u32 page_offset;
dma_addr_t dma_addr; dma_addr_t dma_addr;
u32 len; u32 len;
}; };
struct mqnic_ring { struct mqnic_ring {
// written on enqueue (i.e. start_xmit) // written on enqueue (i.e. start_xmit)
u32 head_ptr; u32 head_ptr;
u64 bytes; u64 bytes;
u64 packets; u64 packets;
u64 dropped_packets; u64 dropped_packets;
struct netdev_queue *tx_queue; struct netdev_queue *tx_queue;
// written from completion // written from completion
u32 tail_ptr ____cacheline_aligned_in_smp; u32 tail_ptr ____cacheline_aligned_in_smp;
u32 clean_tail_ptr; u32 clean_tail_ptr;
u64 ts_s; u64 ts_s;
u8 ts_valid; u8 ts_valid;
// mostly constant // mostly constant
u32 size; u32 size;
u32 full_size; u32 full_size;
u32 size_mask; u32 size_mask;
u32 stride; u32 stride;
u32 cpl_index; u32 cpl_index;
u32 mtu; u32 mtu;
u32 page_order; u32 page_order;
u32 desc_block_size; u32 desc_block_size;
u32 log_desc_block_size; u32 log_desc_block_size;
size_t buf_size; size_t buf_size;
u8 *buf; u8 *buf;
dma_addr_t buf_dma_addr; dma_addr_t buf_dma_addr;
union { union {
struct mqnic_tx_info *tx_info; struct mqnic_tx_info *tx_info;
struct mqnic_rx_info *rx_info; struct mqnic_rx_info *rx_info;
}; };
u32 hw_ptr_mask; u32 hw_ptr_mask;
u8 __iomem *hw_addr; u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr; u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr; u8 __iomem *hw_tail_ptr;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mqnic_cq_ring { struct mqnic_cq_ring {
u32 head_ptr; u32 head_ptr;
u32 tail_ptr; u32 tail_ptr;
u32 size; u32 size;
u32 size_mask; u32 size_mask;
u32 stride; u32 stride;
size_t buf_size; size_t buf_size;
u8 *buf; u8 *buf;
dma_addr_t buf_dma_addr; dma_addr_t buf_dma_addr;
struct net_device *ndev; struct net_device *ndev;
struct napi_struct napi; struct napi_struct napi;
int ring_index; int ring_index;
int eq_index; int eq_index;
void (*handler) (struct mqnic_cq_ring *); void (*handler)(struct mqnic_cq_ring *);
u32 hw_ptr_mask; u32 hw_ptr_mask;
u8 __iomem *hw_addr; u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr; u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr; u8 __iomem *hw_tail_ptr;
}; };
struct mqnic_eq_ring { struct mqnic_eq_ring {
u32 head_ptr; u32 head_ptr;
u32 tail_ptr; u32 tail_ptr;
u32 size; u32 size;
u32 size_mask; u32 size_mask;
u32 stride; u32 stride;
size_t buf_size; size_t buf_size;
u8 *buf; u8 *buf;
dma_addr_t buf_dma_addr; dma_addr_t buf_dma_addr;
struct net_device *ndev; struct net_device *ndev;
int int_index; int int_index;
int irq; int irq;
void (*handler) (struct mqnic_eq_ring *); void (*handler)(struct mqnic_eq_ring *);
u32 hw_ptr_mask; u32 hw_ptr_mask;
u8 __iomem *hw_addr; u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr; u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr; u8 __iomem *hw_tail_ptr;
}; };
struct mqnic_port { struct mqnic_port {
struct device *dev; struct device *dev;
struct net_device *ndev; struct net_device *ndev;
int index; int index;
u32 tx_queue_count; u32 tx_queue_count;
u32 port_id; u32 port_id;
u32 port_features; u32 port_features;
u32 port_mtu; u32 port_mtu;
u32 sched_count; u32 sched_count;
u32 sched_offset; u32 sched_offset;
u32 sched_stride; u32 sched_stride;
u32 sched_type; u32 sched_type;
u8 __iomem *hw_addr; u8 __iomem *hw_addr;
}; };
struct mqnic_priv { struct mqnic_priv {
struct device *dev; struct device *dev;
struct net_device *ndev; struct net_device *ndev;
struct mqnic_dev *mdev; struct mqnic_dev *mdev;
spinlock_t stats_lock; spinlock_t stats_lock;
bool registered; bool registered;
int port; int port;
bool port_up; bool port_up;
u32 if_id; u32 if_id;
u32 if_features; u32 if_features;
u32 event_queue_count; u32 event_queue_count;
u32 event_queue_offset; u32 event_queue_offset;
u32 tx_queue_count; u32 tx_queue_count;
u32 tx_queue_offset; u32 tx_queue_offset;
u32 tx_cpl_queue_count; u32 tx_cpl_queue_count;
u32 tx_cpl_queue_offset; u32 tx_cpl_queue_offset;
u32 rx_queue_count; u32 rx_queue_count;
u32 rx_queue_offset; u32 rx_queue_offset;
u32 rx_cpl_queue_count; u32 rx_cpl_queue_count;
u32 rx_cpl_queue_offset; u32 rx_cpl_queue_offset;
u32 port_count; u32 port_count;
u32 port_offset; u32 port_offset;
u32 port_stride; u32 port_stride;
u32 max_desc_block_size; u32 max_desc_block_size;
u8 __iomem *hw_addr; u8 __iomem *hw_addr;
u8 __iomem *csr_hw_addr; u8 __iomem *csr_hw_addr;
struct mqnic_eq_ring *event_ring[MQNIC_MAX_EVENT_RINGS]; struct mqnic_eq_ring *event_ring[MQNIC_MAX_EVENT_RINGS];
struct mqnic_ring *tx_ring[MQNIC_MAX_TX_RINGS]; struct mqnic_ring *tx_ring[MQNIC_MAX_TX_RINGS];
struct mqnic_cq_ring *tx_cpl_ring[MQNIC_MAX_TX_CPL_RINGS]; struct mqnic_cq_ring *tx_cpl_ring[MQNIC_MAX_TX_CPL_RINGS];
struct mqnic_ring *rx_ring[MQNIC_MAX_RX_RINGS]; struct mqnic_ring *rx_ring[MQNIC_MAX_RX_RINGS];
struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS]; struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS];
struct mqnic_port *ports[MQNIC_MAX_PORTS]; struct mqnic_port *ports[MQNIC_MAX_PORTS];
struct hwtstamp_config hwts_config; struct hwtstamp_config hwts_config;
struct i2c_client *mod_i2c_client; struct i2c_client *mod_i2c_client;
}; };
// mqnic_main.c // mqnic_main.c
@ -327,7 +326,8 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr);
void mqnic_destroy_netdev(struct net_device *ndev); void mqnic_destroy_netdev(struct net_device *ndev);
// mqnic_port.c // mqnic_port.c
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr, int index, u8 __iomem *hw_addr); int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
int index, u8 __iomem *hw_addr);
void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr); void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr);
int mqnic_activate_port(struct mqnic_port *port); int mqnic_activate_port(struct mqnic_port *port);
void mqnic_deactivate_port(struct mqnic_port *port); void mqnic_deactivate_port(struct mqnic_port *port);
@ -341,7 +341,8 @@ void mqnic_port_set_rx_mtu(struct mqnic_port *port, u32 mtu);
// mqnic_ptp.c // mqnic_ptp.c
void mqnic_register_phc(struct mqnic_dev *mdev); void mqnic_register_phc(struct mqnic_dev *mdev);
void mqnic_unregister_phc(struct mqnic_dev *mdev); void mqnic_unregister_phc(struct mqnic_dev *mdev);
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring, const struct mqnic_cpl *cpl); ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring,
const struct mqnic_cpl *cpl);
// mqnic_i2c.c // mqnic_i2c.c
struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *reg); struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *reg);
@ -356,9 +357,11 @@ int mqnic_board_init(struct mqnic_dev *mqnic);
void mqnic_board_deinit(struct mqnic_dev *mqnic); void mqnic_board_deinit(struct mqnic_dev *mqnic);
// mqnic_eq.c // mqnic_eq.c
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr); int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr); void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr);
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring, int int_index); int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
int int_index);
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring); void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring);
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring); bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring);
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring); bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring);
@ -368,9 +371,11 @@ void mqnic_arm_eq(struct mqnic_eq_ring *ring);
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring); void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring);
// mqnic_cq.c // mqnic_cq.c
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr); int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr); void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr);
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring, int eq_index); int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring,
int eq_index);
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring); void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring);
bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring); bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring);
bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring); bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring);
@ -379,35 +384,44 @@ void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring);
void mqnic_arm_cq(struct mqnic_cq_ring *ring); void mqnic_arm_cq(struct mqnic_cq_ring *ring);
// mqnic_tx.c // mqnic_tx.c
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr); int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr); void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index); int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
int cpl_index);
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring); void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring); bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring);
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring); bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring);
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring); void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring);
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring); void mqnic_tx_write_head_ptr(struct mqnic_ring *ring);
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index, int napi_budget); void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index, int napi_budget);
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring); int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget); int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
int napi_budget);
void mqnic_tx_irq(struct mqnic_cq_ring *cq); void mqnic_tx_irq(struct mqnic_cq_ring *cq);
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget); int mqnic_poll_tx_cq(struct napi_struct *napi, int budget);
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev);
// mqnic_rx.c // mqnic_rx.c
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr); int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr); void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index); int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
int cpl_index);
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring); void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring); bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring);
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring); bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring);
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring); void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring);
void mqnic_rx_write_head_ptr(struct mqnic_ring *ring); void mqnic_rx_write_head_ptr(struct mqnic_ring *ring);
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index); void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index);
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring); int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index); int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index);
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring); void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring);
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget); int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
int napi_budget);
void mqnic_rx_irq(struct mqnic_cq_ring *cq); void mqnic_rx_irq(struct mqnic_cq_ring *cq);
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget); int mqnic_poll_rx_cq(struct napi_struct *napi, int budget);

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -33,126 +33,127 @@ either expressed or implied, of The Regents of the University of California.
#include "mqnic.h" #include "mqnic.h"
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr) int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr)
{ {
struct device *dev = priv->dev; struct device *dev = priv->dev;
struct mqnic_cq_ring *ring; struct mqnic_cq_ring *ring;
int ret; int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) if (!ring) {
{ dev_err(dev, "Failed to allocate CQ ring");
dev_err(dev, "Failed to allocate CQ ring"); return -ENOMEM;
return -ENOMEM; }
}
ring->ndev = priv->ndev; ring->ndev = priv->ndev;
ring->size = roundup_pow_of_two(size); ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size-1; ring->size_mask = ring->size - 1;
ring->stride = roundup_pow_of_two(stride); ring->stride = roundup_pow_of_two(stride);
ring->buf_size = ring->size*ring->stride; ring->buf_size = ring->size * ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL); ring->buf = dma_alloc_coherent(dev, ring->buf_size,
if (!ring->buf) &ring->buf_dma_addr, GFP_KERNEL);
{ if (!ring->buf) {
dev_err(dev, "Failed to allocate CQ ring DMA buffer"); dev_err(dev, "Failed to allocate CQ ring DMA buffer");
ret = -ENOMEM; ret = -ENOMEM;
goto fail_ring; goto fail_ring;
} }
ring->hw_addr = hw_addr; ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff; ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_CPL_QUEUE_HEAD_PTR_REG; ring->hw_head_ptr = hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_CPL_QUEUE_TAIL_PTR_REG; ring->hw_tail_ptr = hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0; ring->head_ptr = 0;
ring->tail_ptr = 0; ring->tail_ptr = 0;
// deactivate queue // deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(0, ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address // set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+0); iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+4); iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 4);
// set interrupt index // set interrupt index
iowrite32(0, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG); iowrite32(0, ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
// set pointers // set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_HEAD_PTR_REG); iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_TAIL_PTR_REG); iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG);
// set size // set size
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring; *ring_ptr = ring;
return 0; return 0;
fail_ring: fail_ring:
kfree(ring); kfree(ring);
*ring_ptr = NULL; *ring_ptr = NULL;
return ret; return ret;
} }
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr) void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr)
{ {
struct device *dev = priv->dev; struct device *dev = priv->dev;
struct mqnic_cq_ring *ring = *ring_ptr; struct mqnic_cq_ring *ring = *ring_ptr;
*ring_ptr = NULL; *ring_ptr = NULL;
mqnic_deactivate_cq_ring(priv, ring); mqnic_deactivate_cq_ring(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr); dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kfree(ring); kfree(ring);
} }
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring, int eq_index) int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring, int eq_index)
{ {
ring->eq_index = eq_index; ring->eq_index = eq_index;
// deactivate queue // deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(0, ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address // set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+0); iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+4); iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_CPL_QUEUE_BASE_ADDR_REG + 4);
// set interrupt index // set interrupt index
iowrite32(eq_index, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG); iowrite32(eq_index, ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
// set pointers // set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_HEAD_PTR_REG); iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_TAIL_PTR_REG); iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG);
// set size and activate queue // set size and activate queue
iowrite32(ilog2(ring->size) | MQNIC_CPL_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(ilog2(ring->size) | MQNIC_CPL_QUEUE_ACTIVE_MASK,
ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
return 0; return 0;
} }
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring) void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring)
{ {
// deactivate queue // deactivate queue
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// disarm queue // disarm queue
iowrite32(ring->eq_index, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG); iowrite32(ring->eq_index, ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
} }
bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring) bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring)
{ {
return ring->head_ptr == ring->tail_ptr; return ring->head_ptr == ring->tail_ptr;
} }
bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring) bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring)
{ {
return ring->head_ptr - ring->tail_ptr >= ring->size; return ring->head_ptr - ring->tail_ptr >= ring->size;
} }
void mqnic_cq_read_head_ptr(struct mqnic_cq_ring *ring) void mqnic_cq_read_head_ptr(struct mqnic_cq_ring *ring)
{ {
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask; ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
} }
void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring) void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring)
{ {
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr); iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
} }
void mqnic_arm_cq(struct mqnic_cq_ring *ring) void mqnic_arm_cq(struct mqnic_cq_ring *ring)
{ {
iowrite32(ring->eq_index | MQNIC_CPL_QUEUE_ARM_MASK, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG); iowrite32(ring->eq_index | MQNIC_CPL_QUEUE_ARM_MASK,
ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
} }

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -38,100 +38,89 @@ either expressed or implied, of The Regents of the University of California.
static int mqnic_open(struct inode *inode, struct file *file) static int mqnic_open(struct inode *inode, struct file *file)
{ {
// struct miscdevice *miscdev = file->private_data; // struct miscdevice *miscdev = file->private_data;
// struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev); // struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
return 0; return 0;
} }
static int mqnic_release(struct inode *inode, struct file *file) static int mqnic_release(struct inode *inode, struct file *file)
{ {
// struct miscdevice *miscdev = file->private_data; // struct miscdevice *miscdev = file->private_data;
// struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev); // struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
return 0; return 0;
} }
static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *vma) static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *vma)
{ {
size_t map_size = vma->vm_end - vma->vm_start; size_t map_size = vma->vm_end - vma->vm_start;
int ret; int ret;
if (map_size > mqnic->hw_regs_size) if (map_size > mqnic->hw_regs_size) {
{ dev_err(mqnic->dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <= %llu)",
dev_err(mqnic->dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <= %llu)", vma->vm_end - vma->vm_start, mqnic->hw_regs_size); vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
return -EINVAL; return -EINVAL;
} }
ret = remap_pfn_range(vma, vma->vm_start, mqnic->hw_regs_phys >> PAGE_SHIFT, map_size, pgprot_noncached(vma->vm_page_prot)); ret = remap_pfn_range(vma, vma->vm_start, mqnic->hw_regs_phys >> PAGE_SHIFT,
map_size, pgprot_noncached(vma->vm_page_prot));
if (ret) if (ret)
{ dev_err(mqnic->dev, "mqnic_map_registers: remap_pfn_range failed for registers region");
dev_err(mqnic->dev, "mqnic_map_registers: remap_pfn_range failed for registers region"); else
} dev_dbg(mqnic->dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p",
else &mqnic->hw_regs_phys, (void *)vma->vm_start);
{
dev_dbg(mqnic->dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p", &mqnic->hw_regs_phys, (void *)vma->vm_start);
}
return ret; return ret;
} }
static int mqnic_mmap(struct file *file, struct vm_area_struct *vma) static int mqnic_mmap(struct file *file, struct vm_area_struct *vma)
{ {
struct miscdevice *miscdev = file->private_data; struct miscdevice *miscdev = file->private_data;
struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev); struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
int ret;
if (vma->vm_pgoff == 0) if (vma->vm_pgoff == 0)
{ return mqnic_map_registers(mqnic, vma);
ret = mqnic_map_registers(mqnic, vma);
}
else
{
goto fail_invalid_offset;
}
return ret; dev_err(mqnic->dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu",
vma->vm_pgoff);
fail_invalid_offset: return -EINVAL;
dev_err(mqnic->dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu", vma->vm_pgoff);
return -EINVAL;
} }
static long mqnic_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static long mqnic_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{ {
struct miscdevice *miscdev = file->private_data; struct miscdevice *miscdev = file->private_data;
struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev); struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
if (_IOC_TYPE(cmd) != MQNIC_IOCTL_TYPE) if (_IOC_TYPE(cmd) != MQNIC_IOCTL_TYPE)
return -ENOTTY; return -ENOTTY;
switch (cmd) { switch (cmd) {
case MQNIC_IOCTL_INFO: case MQNIC_IOCTL_INFO:
{ {
struct mqnic_ioctl_info ctl; struct mqnic_ioctl_info ctl;
ctl.fw_id = mqnic->fw_id; ctl.fw_id = mqnic->fw_id;
ctl.fw_ver = mqnic->fw_ver; ctl.fw_ver = mqnic->fw_ver;
ctl.board_id = mqnic->board_id; ctl.board_id = mqnic->board_id;
ctl.board_ver = mqnic->board_ver; ctl.board_ver = mqnic->board_ver;
ctl.regs_size = mqnic->hw_regs_size; ctl.regs_size = mqnic->hw_regs_size;
if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl)) != 0) if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl)) != 0)
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
default: default:
return -ENOTTY; return -ENOTTY;
} }
} }
const struct file_operations mqnic_fops = { const struct file_operations mqnic_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = mqnic_open, .open = mqnic_open,
.release = mqnic_release, .release = mqnic_release,
.mmap = mqnic_mmap, .mmap = mqnic_mmap,
.unlocked_ioctl = mqnic_ioctl, .unlocked_ioctl = mqnic_ioctl,
}; };

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -33,200 +33,199 @@ either expressed or implied, of The Regents of the University of California.
#include "mqnic.h" #include "mqnic.h"
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr) int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr)
{ {
struct device *dev = priv->dev; struct device *dev = priv->dev;
struct mqnic_eq_ring *ring; struct mqnic_eq_ring *ring;
int ret; int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) if (!ring) {
{ dev_err(dev, "Failed to allocate EQ ring");
dev_err(dev, "Failed to allocate EQ ring"); return -ENOMEM;
return -ENOMEM; }
}
ring->ndev = priv->ndev; ring->ndev = priv->ndev;
ring->size = roundup_pow_of_two(size); ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size-1; ring->size_mask = ring->size - 1;
ring->stride = roundup_pow_of_two(stride); ring->stride = roundup_pow_of_two(stride);
ring->buf_size = ring->size*ring->stride; ring->buf_size = ring->size * ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL); ring->buf = dma_alloc_coherent(dev, ring->buf_size,
if (!ring->buf) &ring->buf_dma_addr, GFP_KERNEL);
{ if (!ring->buf) {
dev_err(dev, "Failed to allocate EQ ring DMA buffer"); dev_err(dev, "Failed to allocate EQ ring DMA buffer");
ret = -ENOMEM; ret = -ENOMEM;
goto fail_ring; goto fail_ring;
} }
ring->hw_addr = hw_addr; ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff; ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG; ring->hw_head_ptr = hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG; ring->hw_tail_ptr = hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0; ring->head_ptr = 0;
ring->tail_ptr = 0; ring->tail_ptr = 0;
// deactivate queue // deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address // set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+0); iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+4); iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 4);
// set interrupt index // set interrupt index
iowrite32(0, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG); iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
// set pointers // set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG); iowrite32(ring->head_ptr & ring->hw_ptr_mask,
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG); ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
// set size iowrite32(ring->tail_ptr & ring->hw_ptr_mask,
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG); ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring; *ring_ptr = ring;
return 0; return 0;
fail_ring: fail_ring:
kfree(ring); kfree(ring);
*ring_ptr = NULL; *ring_ptr = NULL;
return ret; return ret;
} }
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr) void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr)
{ {
struct device *dev = priv->dev; struct device *dev = priv->dev;
struct mqnic_eq_ring *ring = *ring_ptr; struct mqnic_eq_ring *ring = *ring_ptr;
*ring_ptr = NULL; *ring_ptr = NULL;
mqnic_deactivate_eq_ring(priv, ring); mqnic_deactivate_eq_ring(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr); dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kfree(ring); kfree(ring);
} }
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring, int int_index) int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
int int_index)
{ {
ring->int_index = int_index; ring->int_index = int_index;
// deactivate queue // deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address // set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+0); iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+4); iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_EVENT_QUEUE_BASE_ADDR_REG + 4);
// set interrupt index // set interrupt index
iowrite32(int_index, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG); iowrite32(int_index, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
// set pointers // set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG); iowrite32(ring->head_ptr & ring->hw_ptr_mask,
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG); ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
// set size and activate queue iowrite32(ring->tail_ptr & ring->hw_ptr_mask,
iowrite32(ilog2(ring->size) | MQNIC_EVENT_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG); ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | MQNIC_EVENT_QUEUE_ACTIVE_MASK,
ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
return 0; return 0;
} }
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring) void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring)
{ {
// deactivate queue // deactivate queue
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// disarm queue // disarm queue
iowrite32(ring->int_index, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG); iowrite32(ring->int_index, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
} }
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring) bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring)
{ {
return ring->head_ptr == ring->tail_ptr; return ring->head_ptr == ring->tail_ptr;
} }
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring) bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring)
{ {
return ring->head_ptr - ring->tail_ptr >= ring->size; return ring->head_ptr - ring->tail_ptr >= ring->size;
} }
void mqnic_eq_read_head_ptr(struct mqnic_eq_ring *ring) void mqnic_eq_read_head_ptr(struct mqnic_eq_ring *ring)
{ {
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask; ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
} }
void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring) void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring)
{ {
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr); iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
} }
void mqnic_arm_eq(struct mqnic_eq_ring *ring) void mqnic_arm_eq(struct mqnic_eq_ring *ring)
{ {
iowrite32(ring->int_index | MQNIC_EVENT_QUEUE_ARM_MASK, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG); iowrite32(ring->int_index | MQNIC_EVENT_QUEUE_ARM_MASK,
ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
} }
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring) void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
{ {
struct mqnic_priv *priv = netdev_priv(ndev); struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_event *event; struct mqnic_event *event;
u32 eq_index; u32 eq_index;
u32 eq_tail_ptr; u32 eq_tail_ptr;
int done = 0; int done = 0;
if (unlikely(!priv->port_up)) if (unlikely(!priv->port_up))
{ return;
return;
}
// read head pointer from NIC // read head pointer from NIC
mqnic_eq_read_head_ptr(eq_ring); mqnic_eq_read_head_ptr(eq_ring);
eq_tail_ptr = eq_ring->tail_ptr; eq_tail_ptr = eq_ring->tail_ptr;
eq_index = eq_tail_ptr & eq_ring->size_mask; eq_index = eq_tail_ptr & eq_ring->size_mask;
while (eq_ring->head_ptr != eq_tail_ptr) while (eq_ring->head_ptr != eq_tail_ptr) {
{ event = (struct mqnic_event *)(eq_ring->buf + eq_index * eq_ring->stride);
event = (struct mqnic_event *)(eq_ring->buf + eq_index*eq_ring->stride);
if (event->type == MQNIC_EVENT_TYPE_TX_CPL) if (event->type == MQNIC_EVENT_TYPE_TX_CPL) {
{ // transmit completion event
// transmit completion event if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count)) {
if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count)) dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)",
{ priv->port, le16_to_cpu(event->source), eq_index,
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", priv->port, le16_to_cpu(event->source), eq_index, le16_to_cpu(event->type)); le16_to_cpu(event->type));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
} event, MQNIC_EVENT_SIZE, true);
else } else {
{ struct mqnic_cq_ring *cq_ring =
struct mqnic_cq_ring *cq_ring = priv->tx_cpl_ring[le16_to_cpu(event->source)]; priv->tx_cpl_ring[le16_to_cpu(event->source)];
if (likely(cq_ring && cq_ring->handler)) if (likely(cq_ring && cq_ring->handler))
{ cq_ring->handler(cq_ring);
cq_ring->handler(cq_ring); }
} } else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL) {
} // receive completion event
} if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count)) {
else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL) dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)",
{ priv->port, le16_to_cpu(event->source), eq_index,
// receive completion event le16_to_cpu(event->type));
if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count)) print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
{ event, MQNIC_EVENT_SIZE, true);
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", priv->port, le16_to_cpu(event->source), eq_index, le16_to_cpu(event->type)); } else {
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true); struct mqnic_cq_ring *cq_ring =
} priv->rx_cpl_ring[le16_to_cpu(event->source)];
else if (likely(cq_ring && cq_ring->handler))
{ cq_ring->handler(cq_ring);
struct mqnic_cq_ring *cq_ring = priv->rx_cpl_ring[le16_to_cpu(event->source)]; }
if (likely(cq_ring && cq_ring->handler)) } else {
{ dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event type %d (index %d, source %d)",
cq_ring->handler(cq_ring); priv->port, le16_to_cpu(event->type), eq_index,
} le16_to_cpu(event->source));
} print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
} event, MQNIC_EVENT_SIZE, true);
else }
{
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event type %d (index %d, source %d)", priv->port, le16_to_cpu(event->type), eq_index, le16_to_cpu(event->source));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true);
}
done++; done++;
eq_tail_ptr++; eq_tail_ptr++;
eq_index = eq_tail_ptr & eq_ring->size_mask; eq_index = eq_tail_ptr & eq_ring->size_mask;
} }
// update eq tail // update eq tail
eq_ring->tail_ptr = eq_tail_ptr; eq_ring->tail_ptr = eq_tail_ptr;
mqnic_eq_write_tail_ptr(eq_ring); mqnic_eq_write_tail_ptr(eq_ring);
} }

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -40,143 +40,136 @@ either expressed or implied, of The Regents of the University of California.
#define SFF_MODULE_ID_QSFP_PLUS 0x0d #define SFF_MODULE_ID_QSFP_PLUS 0x0d
#define SFF_MODULE_ID_QSFP28 0x11 #define SFF_MODULE_ID_QSFP28 0x11
static void mqnic_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) static void mqnic_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{ {
struct mqnic_priv *priv = netdev_priv(ndev); struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev; struct mqnic_dev *mdev = priv->mdev;
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRIVER_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->version, DRIVER_VERSION, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d", mdev->fw_ver >> 16, mdev->fw_ver & 0xffff); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d",
strlcpy(drvinfo->bus_info, dev_name(mdev->dev), sizeof(drvinfo->bus_info)); mdev->fw_ver >> 16, mdev->fw_ver & 0xffff);
strlcpy(drvinfo->bus_info, dev_name(mdev->dev), sizeof(drvinfo->bus_info));
} }
static int mqnic_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) static int mqnic_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{ {
struct mqnic_priv *priv = netdev_priv(ndev); struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev; struct mqnic_dev *mdev = priv->mdev;
ethtool_op_get_ts_info(ndev, info); ethtool_op_get_ts_info(ndev, info);
if (mdev->ptp_clock) if (mdev->ptp_clock)
info->phc_index = ptp_clock_index(mdev->ptp_clock); info->phc_index = ptp_clock_index(mdev->ptp_clock);
if (!(priv->if_features & MQNIC_IF_FEATURE_PTP_TS) || !mdev->ptp_clock) if (!(priv->if_features & MQNIC_IF_FEATURE_PTP_TS) || !mdev->ptp_clock)
return 0; return 0;
info->so_timestamping = info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
info->rx_filters = info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_ALL);
return 0; return 0;
} }
static int mqnic_read_module_eeprom(struct net_device *ndev, u16 offset, u16 len, u8 *data) static int mqnic_read_module_eeprom(struct net_device *ndev,
u16 offset, u16 len, u8 * data)
{ {
struct mqnic_priv *priv = netdev_priv(ndev); struct mqnic_priv *priv = netdev_priv(ndev);
if (!priv->mod_i2c_client) if (!priv->mod_i2c_client)
{ return -1;
return -1;
}
if (len > I2C_SMBUS_BLOCK_MAX) if (len > I2C_SMBUS_BLOCK_MAX)
len = I2C_SMBUS_BLOCK_MAX; len = I2C_SMBUS_BLOCK_MAX;
return i2c_smbus_read_i2c_block_data(priv->mod_i2c_client, offset, len, data); return i2c_smbus_read_i2c_block_data(priv->mod_i2c_client, offset, len, data);
} }
static int mqnic_get_module_info(struct net_device *ndev, struct ethtool_modinfo *modinfo) static int mqnic_get_module_info(struct net_device *ndev,
struct ethtool_modinfo *modinfo)
{ {
struct mqnic_priv *priv = netdev_priv(ndev); struct mqnic_priv *priv = netdev_priv(ndev);
int read_len = 0; int read_len = 0;
u8 data[16]; u8 data[16];
// read module ID and revision // read module ID and revision
read_len = mqnic_read_module_eeprom(ndev, 0, 2, data); read_len = mqnic_read_module_eeprom(ndev, 0, 2, data);
if (read_len < 2) if (read_len < 2)
return -EIO; return -EIO;
// check identifier byte at address 0 // check identifier byte at address 0
switch (data[0]) { switch (data[0]) {
case SFF_MODULE_ID_SFP: case SFF_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472; modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break; break;
case SFF_MODULE_ID_QSFP: case SFF_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436; modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break; break;
case SFF_MODULE_ID_QSFP_PLUS: case SFF_MODULE_ID_QSFP_PLUS:
// check revision at address 1 // check revision at address 1
if (data[1] >= 0x03) if (data[1] >= 0x03) {
{ modinfo->type = ETH_MODULE_SFF_8636;
modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; } else {
} modinfo->type = ETH_MODULE_SFF_8436;
else modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
{ }
modinfo->type = ETH_MODULE_SFF_8436; break;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; case SFF_MODULE_ID_QSFP28:
} modinfo->type = ETH_MODULE_SFF_8636;
break; modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
case SFF_MODULE_ID_QSFP28: break;
modinfo->type = ETH_MODULE_SFF_8636; default:
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; dev_err(priv->dev, "Unknown module ID");
break; return -EINVAL;
default: }
dev_err(priv->dev, "Unknown module ID");
return -EINVAL;
}
return 0; return 0;
} }
static int mqnic_get_module_eeprom(struct net_device *ndev, struct ethtool_eeprom *eeprom, u8 *data) static int mqnic_get_module_eeprom(struct net_device *ndev,
struct ethtool_eeprom *eeprom, u8 * data)
{ {
struct mqnic_priv *priv = netdev_priv(ndev); struct mqnic_priv *priv = netdev_priv(ndev);
int i = 0; int i = 0;
int read_len; int read_len;
if (eeprom->len == 0) if (eeprom->len == 0)
return -EINVAL; return -EINVAL;
memset(data, 0, eeprom->len); memset(data, 0, eeprom->len);
while (i < eeprom->len) while (i < eeprom->len) {
{ read_len = mqnic_read_module_eeprom(ndev, eeprom->offset + i,
read_len = mqnic_read_module_eeprom(ndev, eeprom->offset+i, eeprom->len-i, data+i); eeprom->len - i, data + i);
if (read_len == 0) if (read_len == 0)
return -EIO; return -EIO;
if (read_len < 0) if (read_len < 0) {
{ dev_err(priv->dev, "Failed to read module EEPROM");
dev_err(priv->dev, "Failed to read module EEPROM"); return 0;
return 0; }
}
i += read_len; i += read_len;
} }
return 0; return 0;
} }
const struct ethtool_ops mqnic_ethtool_ops = { const struct ethtool_ops mqnic_ethtool_ops = {
.get_drvinfo = mqnic_get_drvinfo, .get_drvinfo = mqnic_get_drvinfo,
.get_ts_info = mqnic_get_ts_info, .get_ts_info = mqnic_get_ts_info,
.get_module_info = mqnic_get_module_info, .get_module_info = mqnic_get_module_info,
.get_module_eeprom = mqnic_get_module_eeprom, .get_module_eeprom = mqnic_get_module_eeprom,
}; };

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -260,32 +260,32 @@ either expressed or implied, of The Regents of the University of California.
#define MQNIC_EVENT_SIZE 32 #define MQNIC_EVENT_SIZE 32
struct mqnic_desc { struct mqnic_desc {
__le16 rsvd0; __le16 rsvd0;
__le16 tx_csum_cmd; __le16 tx_csum_cmd;
__le32 len; __le32 len;
__le64 addr; __le64 addr;
}; };
struct mqnic_cpl { struct mqnic_cpl {
__le16 queue; __le16 queue;
__le16 index; __le16 index;
__le16 len; __le16 len;
__le16 rsvd0; __le16 rsvd0;
__le32 ts_ns; __le32 ts_ns;
__le16 ts_s; __le16 ts_s;
__le16 rx_csum; __le16 rx_csum;
__le32 rx_hash; __le32 rx_hash;
__u8 rx_hash_type; __u8 rx_hash_type;
__u8 rsvd1; __u8 rsvd1;
__u8 rsvd2; __u8 rsvd2;
__u8 rsvd3; __u8 rsvd3;
__le32 rsvd4; __le32 rsvd4;
__le32 rsvd5; __le32 rsvd5;
}; };
struct mqnic_event { struct mqnic_event {
__le16 type; __le16 type;
__le16 source; __le16 source;
}; };
#endif /* MQNIC_HW_H */ #endif /* MQNIC_HW_H */

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -35,154 +35,147 @@ either expressed or implied, of The Regents of the University of California.
static void mqnic_i2c_set_scl(void *data, int state) static void mqnic_i2c_set_scl(void *data, int state)
{ {
struct mqnic_i2c_bus *bus = data; struct mqnic_i2c_bus *bus = data;
if (state) if (state)
{ iowrite32(ioread32(bus->scl_out_reg) | bus->scl_out_mask, bus->scl_out_reg);
iowrite32(ioread32(bus->scl_out_reg) | bus->scl_out_mask, bus->scl_out_reg); else
} iowrite32(ioread32(bus->scl_out_reg) & ~bus->scl_out_mask, bus->scl_out_reg);
else
{
iowrite32(ioread32(bus->scl_out_reg) & ~bus->scl_out_mask, bus->scl_out_reg);
}
} }
static void mqnic_i2c_set_sda(void *data, int state) static void mqnic_i2c_set_sda(void *data, int state)
{ {
struct mqnic_i2c_bus *bus = data; struct mqnic_i2c_bus *bus = data;
if (state) if (state)
{ iowrite32(ioread32(bus->sda_out_reg) | bus->sda_out_mask, bus->sda_out_reg);
iowrite32(ioread32(bus->sda_out_reg) | bus->sda_out_mask, bus->sda_out_reg); else
} iowrite32(ioread32(bus->sda_out_reg) & ~bus->sda_out_mask, bus->sda_out_reg);
else
{
iowrite32(ioread32(bus->sda_out_reg) & ~bus->sda_out_mask, bus->sda_out_reg);
}
} }
static int mqnic_i2c_get_scl(void *data) static int mqnic_i2c_get_scl(void *data)
{ {
struct mqnic_i2c_bus *bus = data; struct mqnic_i2c_bus *bus = data;
return !!(ioread32(bus->scl_in_reg) & bus->scl_in_mask); return !!(ioread32(bus->scl_in_reg) & bus->scl_in_mask);
} }
static int mqnic_i2c_get_sda(void *data) static int mqnic_i2c_get_sda(void *data)
{ {
struct mqnic_i2c_bus *bus = data; struct mqnic_i2c_bus *bus = data;
return !!(ioread32(bus->sda_in_reg) & bus->sda_in_mask); return !!(ioread32(bus->sda_in_reg) & bus->sda_in_mask);
} }
struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *reg) struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *reg)
{ {
struct mqnic_i2c_bus *bus; struct mqnic_i2c_bus *bus;
struct i2c_algo_bit_data *algo; struct i2c_algo_bit_data *algo;
struct i2c_adapter *adapter; struct i2c_adapter *adapter;
if (!reg) if (!reg)
return NULL; return NULL;
bus = kzalloc(sizeof(*bus), GFP_KERNEL); bus = kzalloc(sizeof(*bus), GFP_KERNEL);
if (!bus) if (!bus)
return NULL; return NULL;
// set private data // set private data
bus->mqnic = mqnic; bus->mqnic = mqnic;
bus->scl_in_reg = reg; bus->scl_in_reg = reg;
bus->scl_out_reg = reg; bus->scl_out_reg = reg;
bus->sda_in_reg = reg; bus->sda_in_reg = reg;
bus->sda_out_reg = reg; bus->sda_out_reg = reg;
bus->scl_in_mask = MQNIC_REG_GPIO_I2C_SCL_IN; bus->scl_in_mask = MQNIC_REG_GPIO_I2C_SCL_IN;
bus->scl_out_mask = MQNIC_REG_GPIO_I2C_SCL_OUT; bus->scl_out_mask = MQNIC_REG_GPIO_I2C_SCL_OUT;
bus->sda_in_mask = MQNIC_REG_GPIO_I2C_SDA_IN; bus->sda_in_mask = MQNIC_REG_GPIO_I2C_SDA_IN;
bus->sda_out_mask = MQNIC_REG_GPIO_I2C_SDA_OUT; bus->sda_out_mask = MQNIC_REG_GPIO_I2C_SDA_OUT;
// bit-bang algorithm setup // bit-bang algorithm setup
algo = &bus->algo; algo = &bus->algo;
algo->udelay = 5; algo->udelay = 5;
algo->timeout = usecs_to_jiffies(2000);; algo->timeout = usecs_to_jiffies(2000);;
algo->setsda = mqnic_i2c_set_sda; algo->setsda = mqnic_i2c_set_sda;
algo->setscl = mqnic_i2c_set_scl; algo->setscl = mqnic_i2c_set_scl;
algo->getsda = mqnic_i2c_get_sda; algo->getsda = mqnic_i2c_get_sda;
algo->getscl = mqnic_i2c_get_scl; algo->getscl = mqnic_i2c_get_scl;
algo->data = bus; algo->data = bus;
// adapter setup // adapter setup
adapter = &bus->adapter; adapter = &bus->adapter;
adapter->owner = THIS_MODULE; adapter->owner = THIS_MODULE;
adapter->algo_data = algo; adapter->algo_data = algo;
adapter->dev.parent = mqnic->dev; adapter->dev.parent = mqnic->dev;
snprintf(adapter->name, sizeof(adapter->name), "%s I2C%d", mqnic->name, mqnic->i2c_adapter_count); snprintf(adapter->name, sizeof(adapter->name), "%s I2C%d", mqnic->name,
mqnic->i2c_adapter_count);
if (i2c_bit_add_bus(adapter)) if (i2c_bit_add_bus(adapter)) {
{ dev_err(mqnic->dev, "Failed to register I2C adapter");
dev_err(mqnic->dev, "Failed to register I2C adapter"); goto err_free_bus;
goto err_free_bus; }
}
list_add_tail(&bus->head, &mqnic->i2c_bus); list_add_tail(&bus->head, &mqnic->i2c_bus);
mqnic->i2c_adapter_count++; mqnic->i2c_adapter_count++;
return bus; return bus;
err_free_bus: err_free_bus:
kfree(bus); kfree(bus);
return NULL; return NULL;
} }
struct i2c_adapter *mqnic_i2c_adapter_create(struct mqnic_dev *mqnic, u8 __iomem *reg) struct i2c_adapter *mqnic_i2c_adapter_create(struct mqnic_dev *mqnic, u8 __iomem *reg)
{ {
struct mqnic_i2c_bus *bus = mqnic_i2c_bus_create(mqnic, reg); struct mqnic_i2c_bus *bus = mqnic_i2c_bus_create(mqnic, reg);
if (!bus) if (!bus)
return NULL; return NULL;
return &bus->adapter; return &bus->adapter;
} }
void mqnic_i2c_bus_release(struct mqnic_i2c_bus *bus) void mqnic_i2c_bus_release(struct mqnic_i2c_bus *bus)
{ {
struct mqnic_dev *mqnic; struct mqnic_dev *mqnic;
if (!bus) if (!bus)
return; return;
mqnic = bus->mqnic; mqnic = bus->mqnic;
mqnic->i2c_adapter_count--; mqnic->i2c_adapter_count--;
i2c_del_adapter(&bus->adapter); i2c_del_adapter(&bus->adapter);
list_del(&bus->head); list_del(&bus->head);
kfree(bus); kfree(bus);
} }
void mqnic_i2c_adapter_release(struct i2c_adapter *adapter) void mqnic_i2c_adapter_release(struct i2c_adapter *adapter)
{ {
struct mqnic_i2c_bus *bus; struct mqnic_i2c_bus *bus;
if (!adapter) if (!adapter)
return; return;
bus = container_of(adapter, struct mqnic_i2c_bus, adapter); bus = container_of(adapter, struct mqnic_i2c_bus, adapter);
mqnic_i2c_bus_release(bus); mqnic_i2c_bus_release(bus);
} }
int mqnic_i2c_init(struct mqnic_dev *mqnic) int mqnic_i2c_init(struct mqnic_dev *mqnic)
{ {
INIT_LIST_HEAD(&mqnic->i2c_bus); INIT_LIST_HEAD(&mqnic->i2c_bus);
return 0; return 0;
} }
void mqnic_i2c_deinit(struct mqnic_dev *mqnic) void mqnic_i2c_deinit(struct mqnic_dev *mqnic)
{ {
while (!list_empty(&mqnic->i2c_bus)) struct mqnic_i2c_bus *bus;
{
struct mqnic_i2c_bus *bus = list_first_entry(&mqnic->i2c_bus, typeof(*bus), head); while (!list_empty(&mqnic->i2c_bus)) {
mqnic_i2c_bus_release(bus); bus = list_first_entry(&mqnic->i2c_bus, typeof(*bus), head);
} mqnic_i2c_bus_release(bus);
}
} }

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -41,11 +41,11 @@ either expressed or implied, of The Regents of the University of California.
#define MQNIC_IOCTL_INFO _IOR(MQNIC_IOCTL_TYPE, 0xf0, struct mqnic_ioctl_info) #define MQNIC_IOCTL_INFO _IOR(MQNIC_IOCTL_TYPE, 0xf0, struct mqnic_ioctl_info)
struct mqnic_ioctl_info { struct mqnic_ioctl_info {
__u32 fw_id; __u32 fw_id;
__u32 fw_ver; __u32 fw_ver;
__u32 board_id; __u32 board_id;
__u32 board_ver; __u32 board_ver;
size_t regs_size; size_t regs_size;
}; };
#endif /* MQNIC_IOCTL_H */ #endif /* MQNIC_IOCTL_H */

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -36,7 +36,7 @@ either expressed or implied, of The Regents of the University of California.
#include <linux/version.h> #include <linux/version.h>
#include <linux/delay.h> #include <linux/delay.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
#include <linux/pci-aspm.h> #include <linux/pci-aspm.h>
#endif #endif
@ -46,9 +46,9 @@ MODULE_LICENSE("Dual MIT/GPL");
MODULE_VERSION(DRIVER_VERSION); MODULE_VERSION(DRIVER_VERSION);
static const struct pci_device_id mqnic_pci_id_table[] = { static const struct pci_device_id mqnic_pci_id_table[] = {
{ PCI_DEVICE(0x1234, 0x1001) }, {PCI_DEVICE(0x1234, 0x1001)},
{ PCI_DEVICE(0x5543, 0x1001) }, {PCI_DEVICE(0x5543, 0x1001)},
{ 0 /* end */ } {0 /* end */ }
}; };
MODULE_DEVICE_TABLE(pci, mqnic_pci_id_table); MODULE_DEVICE_TABLE(pci, mqnic_pci_id_table);
@ -58,416 +58,388 @@ static DEFINE_SPINLOCK(mqnic_devices_lock);
static unsigned int mqnic_get_free_id(void) static unsigned int mqnic_get_free_id(void)
{ {
struct mqnic_dev *mqnic; struct mqnic_dev *mqnic;
unsigned int id = 0; unsigned int id = 0;
bool available = false; bool available = false;
while (!available) while (!available) {
{ available = true;
available = true; list_for_each_entry(mqnic, &mqnic_devices, dev_list_node) {
list_for_each_entry(mqnic, &mqnic_devices, dev_list_node) if (mqnic->id == id) {
{ available = false;
if (mqnic->id == id) id++;
{ break;
available = false; }
id++; }
break; }
}
}
}
return id; return id;
} }
static irqreturn_t mqnic_interrupt(int irq, void *data) static irqreturn_t mqnic_interrupt(int irq, void *data)
{ {
struct mqnic_dev *mqnic = data; struct mqnic_dev *mqnic = data;
struct mqnic_priv *priv; struct mqnic_priv *priv;
int k, l; int k, l;
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++) for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++) {
{ if (unlikely(!mqnic->ndev[k]))
if (unlikely(!mqnic->ndev[k])) continue;
continue;
priv = netdev_priv(mqnic->ndev[k]); priv = netdev_priv(mqnic->ndev[k]);
if (unlikely(!priv->port_up)) if (unlikely(!priv->port_up))
continue; continue;
for (l = 0; l < priv->event_queue_count; l++) for (l = 0; l < priv->event_queue_count; l++) {
{ if (unlikely(!priv->event_ring[l]))
if (unlikely(!priv->event_ring[l])) continue;
continue;
if (priv->event_ring[l]->irq == irq) if (priv->event_ring[l]->irq == irq) {
{ mqnic_process_eq(priv->ndev, priv->event_ring[l]);
mqnic_process_eq(priv->ndev, priv->event_ring[l]); mqnic_arm_eq(priv->event_ring[l]);
mqnic_arm_eq(priv->event_ring[l]); }
} }
} }
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
int ret = 0; int ret = 0;
struct mqnic_dev *mqnic; struct mqnic_dev *mqnic;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int k = 0; int k = 0;
dev_info(dev, DRIVER_NAME " PCI probe"); dev_info(dev, DRIVER_NAME " PCI probe");
dev_info(dev, " Vendor: 0x%04x", pdev->vendor); dev_info(dev, " Vendor: 0x%04x", pdev->vendor);
dev_info(dev, " Device: 0x%04x", pdev->device); dev_info(dev, " Device: 0x%04x", pdev->device);
dev_info(dev, " Class: 0x%06x", pdev->class); dev_info(dev, " Class: 0x%06x", pdev->class);
dev_info(dev, " PCI ID: %04x:%02x:%02x.%d", pci_domain_nr(pdev->bus), dev_info(dev, " PCI ID: %04x:%02x:%02x.%d", pci_domain_nr(pdev->bus),
pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
if (pdev->pcie_cap) { if (pdev->pcie_cap) {
u16 devctl; u16 devctl;
u32 lnkcap; u32 lnkcap;
u16 lnksta; u16 lnksta;
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_DEVCTL, &devctl); pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_DEVCTL, &devctl);
pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &lnkcap); pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &lnkcap);
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_LNKSTA, &lnksta); pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_LNKSTA, &lnksta);
dev_info(dev, " Max payload size: %d bytes", 128 << ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5)); dev_info(dev, " Max payload size: %d bytes",
dev_info(dev, " Max read request size: %d bytes", 128 << ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12)); 128 << ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5));
dev_info(dev, " Link capability: gen %d x%d", lnkcap & PCI_EXP_LNKCAP_SLS, (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4); dev_info(dev, " Max read request size: %d bytes",
dev_info(dev, " Link status: gen %d x%d", lnksta & PCI_EXP_LNKSTA_CLS, (lnksta & PCI_EXP_LNKSTA_NLW) >> 4); 128 << ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12));
dev_info(dev, " Relaxed ordering: %s", devctl & PCI_EXP_DEVCTL_RELAX_EN ? "enabled" : "disabled"); dev_info(dev, " Link capability: gen %d x%d",
dev_info(dev, " Phantom functions: %s", devctl & PCI_EXP_DEVCTL_PHANTOM ? "enabled" : "disabled"); lnkcap & PCI_EXP_LNKCAP_SLS, (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
dev_info(dev, " Extended tags: %s", devctl & PCI_EXP_DEVCTL_EXT_TAG ? "enabled" : "disabled"); dev_info(dev, " Link status: gen %d x%d",
dev_info(dev, " No snoop: %s", devctl & PCI_EXP_DEVCTL_NOSNOOP_EN ? "enabled" : "disabled"); lnksta & PCI_EXP_LNKSTA_CLS, (lnksta & PCI_EXP_LNKSTA_NLW) >> 4);
} dev_info(dev, " Relaxed ordering: %s",
devctl & PCI_EXP_DEVCTL_RELAX_EN ? "enabled" : "disabled");
dev_info(dev, " Phantom functions: %s",
devctl & PCI_EXP_DEVCTL_PHANTOM ? "enabled" : "disabled");
dev_info(dev, " Extended tags: %s",
devctl & PCI_EXP_DEVCTL_EXT_TAG ? "enabled" : "disabled");
dev_info(dev, " No snoop: %s",
devctl & PCI_EXP_DEVCTL_NOSNOOP_EN ? "enabled" : "disabled");
}
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
dev_info(dev, " NUMA node: %d", pdev->dev.numa_node); dev_info(dev, " NUMA node: %d", pdev->dev.numa_node);
#endif #endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0)
pcie_print_link_status(pdev); pcie_print_link_status(pdev);
#endif #endif
if (!(mqnic = devm_kzalloc(dev, sizeof(*mqnic), GFP_KERNEL))) mqnic = devm_kzalloc(dev, sizeof(*mqnic), GFP_KERNEL);
{ if (!mqnic) {
return -ENOMEM; dev_err(dev, "Failed to allocate memory");
} return -ENOMEM;
}
mqnic->dev = dev; mqnic->dev = dev;
mqnic->pdev = pdev; mqnic->pdev = pdev;
pci_set_drvdata(pdev, mqnic); pci_set_drvdata(pdev, mqnic);
// assign ID and add to list // assign ID and add to list
spin_lock(&mqnic_devices_lock); spin_lock(&mqnic_devices_lock);
mqnic->id = mqnic_get_free_id(); mqnic->id = mqnic_get_free_id();
list_add_tail(&mqnic->dev_list_node, &mqnic_devices); list_add_tail(&mqnic->dev_list_node, &mqnic_devices);
spin_unlock(&mqnic_devices_lock); spin_unlock(&mqnic_devices_lock);
snprintf(mqnic->name, sizeof(mqnic->name), DRIVER_NAME "%d", mqnic->id); snprintf(mqnic->name, sizeof(mqnic->name), DRIVER_NAME "%d", mqnic->id);
// Disable ASPM // Disable ASPM
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
// Enable device // Enable device
ret = pci_enable_device_mem(pdev); ret = pci_enable_device_mem(pdev);
if (ret) if (ret) {
{ dev_err(dev, "Failed to enable PCI device");
dev_err(dev, "Failed to enable PCI device"); goto fail_enable_device;
goto fail_enable_device; }
}
// Set mask // Set mask
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) if (ret) {
{ dev_warn(dev, "Warning: failed to set 64 bit PCI DMA mask");
dev_warn(dev, "Warning: failed to set 64 bit PCI DMA mask"); ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) {
if (ret) dev_err(dev, "Failed to set PCI DMA mask");
{ goto fail_regions;
dev_err(dev, "Failed to set PCI DMA mask"); }
goto fail_regions; }
}
}
// Set max segment size // Set max segment size
dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
// Reserve regions // Reserve regions
ret = pci_request_regions(pdev, DRIVER_NAME); ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret) if (ret) {
{ dev_err(dev, "Failed to reserve regions");
dev_err(dev, "Failed to reserve regions"); goto fail_regions;
goto fail_regions; }
}
mqnic->hw_regs_size = pci_resource_len(pdev, 0); mqnic->hw_regs_size = pci_resource_len(pdev, 0);
mqnic->hw_regs_phys = pci_resource_start(pdev, 0); mqnic->hw_regs_phys = pci_resource_start(pdev, 0);
mqnic->app_hw_regs_size = pci_resource_len(pdev, 2); mqnic->app_hw_regs_size = pci_resource_len(pdev, 2);
mqnic->app_hw_regs_phys = pci_resource_start(pdev, 2); mqnic->app_hw_regs_phys = pci_resource_start(pdev, 2);
mqnic->ram_hw_regs_size = pci_resource_len(pdev, 4); mqnic->ram_hw_regs_size = pci_resource_len(pdev, 4);
mqnic->ram_hw_regs_phys = pci_resource_start(pdev, 4); mqnic->ram_hw_regs_phys = pci_resource_start(pdev, 4);
// Map BARs // Map BARs
dev_info(dev, "Control BAR size: %llu", mqnic->hw_regs_size); dev_info(dev, "Control BAR size: %llu", mqnic->hw_regs_size);
mqnic->hw_addr = pci_ioremap_bar(pdev, 0); mqnic->hw_addr = pci_ioremap_bar(pdev, 0);
if (!mqnic->hw_addr) if (!mqnic->hw_addr) {
{ ret = -ENOMEM;
ret = -ENOMEM; dev_err(dev, "Failed to map control BAR");
dev_err(dev, "Failed to map control BAR"); goto fail_map_bars;
goto fail_map_bars; }
}
if (mqnic->app_hw_regs_size) if (mqnic->app_hw_regs_size) {
{ dev_info(dev, "Application BAR size: %llu", mqnic->app_hw_regs_size);
dev_info(dev, "Application BAR size: %llu", mqnic->app_hw_regs_size); mqnic->app_hw_addr = pci_ioremap_bar(pdev, 2);
mqnic->app_hw_addr = pci_ioremap_bar(pdev, 2); if (!mqnic->app_hw_addr) {
if (!mqnic->app_hw_addr) ret = -ENOMEM;
{ dev_err(dev, "Failed to map application BAR");
ret = -ENOMEM; goto fail_map_bars;
dev_err(dev, "Failed to map application BAR"); }
goto fail_map_bars; }
}
}
if (mqnic->ram_hw_regs_size) if (mqnic->ram_hw_regs_size) {
{ dev_info(dev, "RAM BAR size: %llu", mqnic->ram_hw_regs_size);
dev_info(dev, "RAM BAR size: %llu", mqnic->ram_hw_regs_size); mqnic->ram_hw_addr = pci_ioremap_bar(pdev, 4);
mqnic->ram_hw_addr = pci_ioremap_bar(pdev, 4); if (!mqnic->ram_hw_addr) {
if (!mqnic->ram_hw_addr) ret = -ENOMEM;
{ dev_err(dev, "Failed to map RAM BAR");
ret = -ENOMEM; goto fail_map_bars;
dev_err(dev, "Failed to map RAM BAR"); }
goto fail_map_bars; }
}
}
// Check if device needs to be reset // Check if device needs to be reset
if (ioread32(mqnic->hw_addr) == 0xffffffff) if (ioread32(mqnic->hw_addr) == 0xffffffff) {
{ ret = -EIO;
ret = -EIO; dev_err(dev, "Device needs to be reset");
dev_err(dev, "Device needs to be reset"); goto fail_map_bars;
goto fail_map_bars; }
}
// Read ID registers // Read ID registers
mqnic->fw_id = ioread32(mqnic->hw_addr+MQNIC_REG_FW_ID); mqnic->fw_id = ioread32(mqnic->hw_addr + MQNIC_REG_FW_ID);
dev_info(dev, "FW ID: 0x%08x", mqnic->fw_id); dev_info(dev, "FW ID: 0x%08x", mqnic->fw_id);
mqnic->fw_ver = ioread32(mqnic->hw_addr+MQNIC_REG_FW_VER); mqnic->fw_ver = ioread32(mqnic->hw_addr + MQNIC_REG_FW_VER);
dev_info(dev, "FW version: %d.%d", mqnic->fw_ver >> 16, mqnic->fw_ver & 0xffff); dev_info(dev, "FW version: %d.%d", mqnic->fw_ver >> 16, mqnic->fw_ver & 0xffff);
mqnic->board_id = ioread32(mqnic->hw_addr+MQNIC_REG_BOARD_ID); mqnic->board_id = ioread32(mqnic->hw_addr + MQNIC_REG_BOARD_ID);
dev_info(dev, "Board ID: 0x%08x", mqnic->board_id); dev_info(dev, "Board ID: 0x%08x", mqnic->board_id);
mqnic->board_ver = ioread32(mqnic->hw_addr+MQNIC_REG_BOARD_VER); mqnic->board_ver = ioread32(mqnic->hw_addr + MQNIC_REG_BOARD_VER);
dev_info(dev, "Board version: %d.%d", mqnic->board_ver >> 16, mqnic->board_ver & 0xffff); dev_info(dev, "Board version: %d.%d", mqnic->board_ver >> 16, mqnic->board_ver & 0xffff);
mqnic->phc_count = ioread32(mqnic->hw_addr+MQNIC_REG_PHC_COUNT); mqnic->phc_count = ioread32(mqnic->hw_addr + MQNIC_REG_PHC_COUNT);
dev_info(dev, "PHC count: %d", mqnic->phc_count); dev_info(dev, "PHC count: %d", mqnic->phc_count);
mqnic->phc_offset = ioread32(mqnic->hw_addr+MQNIC_REG_PHC_OFFSET); mqnic->phc_offset = ioread32(mqnic->hw_addr + MQNIC_REG_PHC_OFFSET);
dev_info(dev, "PHC offset: 0x%08x", mqnic->phc_offset); dev_info(dev, "PHC offset: 0x%08x", mqnic->phc_offset);
if (mqnic->phc_count) if (mqnic->phc_count)
mqnic->phc_hw_addr = mqnic->hw_addr+mqnic->phc_offset; mqnic->phc_hw_addr = mqnic->hw_addr + mqnic->phc_offset;
mqnic->if_count = ioread32(mqnic->hw_addr+MQNIC_REG_IF_COUNT); mqnic->if_count = ioread32(mqnic->hw_addr + MQNIC_REG_IF_COUNT);
dev_info(dev, "IF count: %d", mqnic->if_count); dev_info(dev, "IF count: %d", mqnic->if_count);
mqnic->if_stride = ioread32(mqnic->hw_addr+MQNIC_REG_IF_STRIDE); mqnic->if_stride = ioread32(mqnic->hw_addr + MQNIC_REG_IF_STRIDE);
dev_info(dev, "IF stride: 0x%08x", mqnic->if_stride); dev_info(dev, "IF stride: 0x%08x", mqnic->if_stride);
mqnic->if_csr_offset = ioread32(mqnic->hw_addr+MQNIC_REG_IF_CSR_OFFSET); mqnic->if_csr_offset = ioread32(mqnic->hw_addr + MQNIC_REG_IF_CSR_OFFSET);
dev_info(dev, "IF CSR offset: 0x%08x", mqnic->if_csr_offset); dev_info(dev, "IF CSR offset: 0x%08x", mqnic->if_csr_offset);
// check BAR size // check BAR size
if (mqnic->if_count*mqnic->if_stride > mqnic->hw_regs_size) if (mqnic->if_count * mqnic->if_stride > mqnic->hw_regs_size) {
{ ret = -EIO;
ret = -EIO; dev_err(dev, "Invalid BAR configuration (%d IF * 0x%x > 0x%llx)",
dev_err(dev, "Invalid BAR configuration (%d IF * 0x%x > 0x%llx)", mqnic->if_count, mqnic->if_stride, mqnic->hw_regs_size); mqnic->if_count, mqnic->if_stride, mqnic->hw_regs_size);
goto fail_map_bars; goto fail_map_bars;
} }
// Allocate MSI IRQs // Allocate MSI IRQs
mqnic->irq_count = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); mqnic->irq_count = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (mqnic->irq_count < 0) if (mqnic->irq_count < 0) {
{ ret = -ENOMEM;
ret = -ENOMEM; dev_err(dev, "Failed to allocate IRQs");
dev_err(dev, "Failed to allocate IRQs"); goto fail_map_bars;
goto fail_map_bars; }
}
// Set up interrupts // Set up interrupts
for (k = 0; k < mqnic->irq_count; k++) for (k = 0; k < mqnic->irq_count; k++) {
{ ret = pci_request_irq(pdev, k, mqnic_interrupt, NULL,
ret = pci_request_irq(pdev, k, mqnic_interrupt, NULL, mqnic, "%s-%d", mqnic->name, k); mqnic, "%s-%d", mqnic->name, k);
if (ret < 0) if (ret < 0) {
{ dev_err(dev, "Failed to request IRQ");
dev_err(dev, "Failed to request IRQ"); goto fail_irq;
goto fail_irq; }
}
mqnic->irq_map[k] = pci_irq_vector(pdev, k); mqnic->irq_map[k] = pci_irq_vector(pdev, k);
} }
// Board-specific init // Board-specific init
ret = mqnic_board_init(mqnic); ret = mqnic_board_init(mqnic);
if (ret) if (ret) {
{ dev_err(dev, "Failed to initialize board");
dev_err(dev, "Failed to initialize board"); goto fail_board;
goto fail_board; }
}
// Enable bus mastering for DMA // Enable bus mastering for DMA
pci_set_master(pdev); pci_set_master(pdev);
// register PHC // register PHC
if (mqnic->phc_count) if (mqnic->phc_count)
{ mqnic_register_phc(mqnic);
mqnic_register_phc(mqnic);
}
// Set up interfaces // Set up interfaces
if (mqnic->if_count > MQNIC_MAX_IF) if (mqnic->if_count > MQNIC_MAX_IF)
mqnic->if_count = MQNIC_MAX_IF; mqnic->if_count = MQNIC_MAX_IF;
for (k = 0; k < mqnic->if_count; k++) for (k = 0; k < mqnic->if_count; k++) {
{ dev_info(dev, "Creating interface %d", k);
dev_info(dev, "Creating interface %d", k); ret = mqnic_init_netdev(mqnic, k, mqnic->hw_addr + k * mqnic->if_stride);
ret = mqnic_init_netdev(mqnic, k, mqnic->hw_addr + k*mqnic->if_stride); if (ret) {
if (ret) dev_err(dev, "Failed to create net_device");
{ goto fail_init_netdev;
dev_err(dev, "Failed to create net_device"); }
goto fail_init_netdev; }
}
}
// pass module I2C clients to net_device instances // pass module I2C clients to net_device instances
for (k = 0; k < mqnic->if_count; k++) for (k = 0; k < mqnic->if_count; k++) {
{ struct mqnic_priv *priv = netdev_priv(mqnic->ndev[k]);
struct mqnic_priv *priv = netdev_priv(mqnic->ndev[k]); priv->mod_i2c_client = mqnic->mod_i2c_client[k];
priv->mod_i2c_client = mqnic->mod_i2c_client[k]; }
}
mqnic->misc_dev.minor = MISC_DYNAMIC_MINOR; mqnic->misc_dev.minor = MISC_DYNAMIC_MINOR;
mqnic->misc_dev.name = mqnic->name; mqnic->misc_dev.name = mqnic->name;
mqnic->misc_dev.fops = &mqnic_fops; mqnic->misc_dev.fops = &mqnic_fops;
mqnic->misc_dev.parent = dev; mqnic->misc_dev.parent = dev;
ret = misc_register(&mqnic->misc_dev); ret = misc_register(&mqnic->misc_dev);
if (ret) if (ret) {
{ dev_err(dev, "misc_register failed: %d\n", ret);
dev_err(dev, "misc_register failed: %d\n", ret); goto fail_miscdev;
goto fail_miscdev; }
}
dev_info(dev, "Registered device %s", mqnic->name); dev_info(dev, "Registered device %s", mqnic->name);
pci_save_state(pdev); pci_save_state(pdev);
mutex_init(&mqnic->state_lock); mutex_init(&mqnic->state_lock);
// probe complete // probe complete
return 0; return 0;
// error handling // error handling
fail_miscdev: fail_miscdev:
fail_init_netdev: fail_init_netdev:
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++) for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
{ if (mqnic->ndev[k])
if (mqnic->ndev[k]) mqnic_destroy_netdev(mqnic->ndev[k]);
{ mqnic_unregister_phc(mqnic);
mqnic_destroy_netdev(mqnic->ndev[k]); pci_clear_master(pdev);
}
}
mqnic_unregister_phc(mqnic);
pci_clear_master(pdev);
fail_board: fail_board:
mqnic_board_deinit(mqnic); mqnic_board_deinit(mqnic);
for (k = 0; k < mqnic->irq_count; k++) for (k = 0; k < mqnic->irq_count; k++)
{ pci_free_irq(pdev, k, mqnic);
pci_free_irq(pdev, k, mqnic);
}
fail_irq: fail_irq:
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
fail_map_bars: fail_map_bars:
if (mqnic->hw_addr) if (mqnic->hw_addr)
pci_iounmap(pdev, mqnic->hw_addr); pci_iounmap(pdev, mqnic->hw_addr);
if (mqnic->app_hw_addr) if (mqnic->app_hw_addr)
pci_iounmap(pdev, mqnic->app_hw_addr); pci_iounmap(pdev, mqnic->app_hw_addr);
if (mqnic->ram_hw_addr) if (mqnic->ram_hw_addr)
pci_iounmap(pdev, mqnic->ram_hw_addr); pci_iounmap(pdev, mqnic->ram_hw_addr);
pci_release_regions(pdev); pci_release_regions(pdev);
fail_regions: fail_regions:
pci_disable_device(pdev); pci_disable_device(pdev);
fail_enable_device: fail_enable_device:
spin_lock(&mqnic_devices_lock); spin_lock(&mqnic_devices_lock);
list_del(&mqnic->dev_list_node); list_del(&mqnic->dev_list_node);
spin_unlock(&mqnic_devices_lock); spin_unlock(&mqnic_devices_lock);
return ret; return ret;
} }
static void mqnic_pci_remove(struct pci_dev *pdev) static void mqnic_pci_remove(struct pci_dev *pdev)
{ {
struct mqnic_dev *mqnic = pci_get_drvdata(pdev); struct mqnic_dev *mqnic = pci_get_drvdata(pdev);
int k = 0; int k = 0;
dev_info(&pdev->dev, DRIVER_NAME " PCI remove"); dev_info(&pdev->dev, DRIVER_NAME " PCI remove");
misc_deregister(&mqnic->misc_dev); misc_deregister(&mqnic->misc_dev);
spin_lock(&mqnic_devices_lock); spin_lock(&mqnic_devices_lock);
list_del(&mqnic->dev_list_node); list_del(&mqnic->dev_list_node);
spin_unlock(&mqnic_devices_lock); spin_unlock(&mqnic_devices_lock);
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++) for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
{ if (mqnic->ndev[k])
if (mqnic->ndev[k]) mqnic_destroy_netdev(mqnic->ndev[k]);
{
mqnic_destroy_netdev(mqnic->ndev[k]);
}
}
mqnic_unregister_phc(mqnic); mqnic_unregister_phc(mqnic);
pci_clear_master(pdev); pci_clear_master(pdev);
mqnic_board_deinit(mqnic); mqnic_board_deinit(mqnic);
for (k = 0; k < mqnic->irq_count; k++) for (k = 0; k < mqnic->irq_count; k++)
{ pci_free_irq(pdev, k, mqnic);
pci_free_irq(pdev, k, mqnic); pci_free_irq_vectors(pdev);
} if (mqnic->hw_addr)
pci_free_irq_vectors(pdev); pci_iounmap(pdev, mqnic->hw_addr);
if (mqnic->hw_addr) if (mqnic->app_hw_addr)
pci_iounmap(pdev, mqnic->hw_addr); pci_iounmap(pdev, mqnic->app_hw_addr);
if (mqnic->app_hw_addr) if (mqnic->ram_hw_addr)
pci_iounmap(pdev, mqnic->app_hw_addr); pci_iounmap(pdev, mqnic->ram_hw_addr);
if (mqnic->ram_hw_addr) pci_release_regions(pdev);
pci_iounmap(pdev, mqnic->ram_hw_addr); pci_disable_device(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
} }
static void mqnic_pci_shutdown(struct pci_dev *pdev) static void mqnic_pci_shutdown(struct pci_dev *pdev)
{ {
dev_info(&pdev->dev, DRIVER_NAME " PCI shutdown"); dev_info(&pdev->dev, DRIVER_NAME " PCI shutdown");
mqnic_pci_remove(pdev); mqnic_pci_remove(pdev);
} }
static struct pci_driver mqnic_pci_driver = { static struct pci_driver mqnic_pci_driver = {
.name = DRIVER_NAME, .name = DRIVER_NAME,
.id_table = mqnic_pci_id_table, .id_table = mqnic_pci_id_table,
.probe = mqnic_pci_probe, .probe = mqnic_pci_probe,
.remove = mqnic_pci_remove, .remove = mqnic_pci_remove,
.shutdown = mqnic_pci_shutdown .shutdown = mqnic_pci_shutdown
}; };
static int __init mqnic_init(void) static int __init mqnic_init(void)
{ {
return pci_register_driver(&mqnic_pci_driver); return pci_register_driver(&mqnic_pci_driver);
} }
static void __exit mqnic_exit(void) static void __exit mqnic_exit(void)
{ {
pci_unregister_driver(&mqnic_pci_driver); pci_unregister_driver(&mqnic_pci_driver);
} }
module_init(mqnic_init); module_init(mqnic_init);
module_exit(mqnic_exit); module_exit(mqnic_exit);

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -33,109 +33,107 @@ either expressed or implied, of The Regents of the University of California.
#include "mqnic.h" #include "mqnic.h"
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr, int index, u8 __iomem *hw_addr) int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
int index, u8 __iomem *hw_addr)
{ {
struct device *dev = priv->dev; struct device *dev = priv->dev;
struct mqnic_port *port; struct mqnic_port *port;
port = kzalloc(sizeof(*port), GFP_KERNEL); port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port) if (!port) {
{ dev_err(dev, "Failed to allocate port");
dev_err(dev, "Failed to allocate port"); return -ENOMEM;
return -ENOMEM; }
}
*port_ptr = port; *port_ptr = port;
port->dev = dev; port->dev = dev;
port->ndev = priv->ndev; port->ndev = priv->ndev;
port->index = index; port->index = index;
port->tx_queue_count = priv->tx_queue_count; port->tx_queue_count = priv->tx_queue_count;
port->hw_addr = hw_addr; port->hw_addr = hw_addr;
// read ID registers // read ID registers
port->port_id = ioread32(port->hw_addr+MQNIC_PORT_REG_PORT_ID); port->port_id = ioread32(port->hw_addr + MQNIC_PORT_REG_PORT_ID);
dev_info(dev, "Port ID: 0x%08x", port->port_id); dev_info(dev, "Port ID: 0x%08x", port->port_id);
port->port_features = ioread32(port->hw_addr+MQNIC_PORT_REG_PORT_FEATURES); port->port_features = ioread32(port->hw_addr + MQNIC_PORT_REG_PORT_FEATURES);
dev_info(dev, "Port features: 0x%08x", port->port_features); dev_info(dev, "Port features: 0x%08x", port->port_features);
port->port_mtu = ioread32(port->hw_addr+MQNIC_PORT_REG_PORT_MTU); port->port_mtu = ioread32(port->hw_addr + MQNIC_PORT_REG_PORT_MTU);
dev_info(dev, "Port MTU: %d", port->port_mtu); dev_info(dev, "Port MTU: %d", port->port_mtu);
port->sched_count = ioread32(port->hw_addr+MQNIC_PORT_REG_SCHED_COUNT); port->sched_count = ioread32(port->hw_addr + MQNIC_PORT_REG_SCHED_COUNT);
dev_info(dev, "Scheduler count: %d", port->sched_count); dev_info(dev, "Scheduler count: %d", port->sched_count);
port->sched_offset = ioread32(port->hw_addr+MQNIC_PORT_REG_SCHED_OFFSET); port->sched_offset = ioread32(port->hw_addr + MQNIC_PORT_REG_SCHED_OFFSET);
dev_info(dev, "Scheduler offset: 0x%08x", port->sched_offset); dev_info(dev, "Scheduler offset: 0x%08x", port->sched_offset);
port->sched_stride = ioread32(port->hw_addr+MQNIC_PORT_REG_SCHED_STRIDE); port->sched_stride = ioread32(port->hw_addr + MQNIC_PORT_REG_SCHED_STRIDE);
dev_info(dev, "Scheduler stride: 0x%08x", port->sched_stride); dev_info(dev, "Scheduler stride: 0x%08x", port->sched_stride);
port->sched_type = ioread32(port->hw_addr+MQNIC_PORT_REG_SCHED_TYPE); port->sched_type = ioread32(port->hw_addr + MQNIC_PORT_REG_SCHED_TYPE);
dev_info(dev, "Scheduler type: 0x%08x", port->sched_type); dev_info(dev, "Scheduler type: 0x%08x", port->sched_type);
mqnic_deactivate_port(port); mqnic_deactivate_port(port);
return 0; return 0;
} }
void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr) void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr)
{ {
struct mqnic_port *port = *port_ptr; struct mqnic_port *port = *port_ptr;
*port_ptr = NULL; *port_ptr = NULL;
mqnic_deactivate_port(port); mqnic_deactivate_port(port);
kfree(port); kfree(port);
} }
int mqnic_activate_port(struct mqnic_port *port) int mqnic_activate_port(struct mqnic_port *port)
{ {
int k; int k;
// enable schedulers // enable schedulers
iowrite32(0xffffffff, port->hw_addr+MQNIC_PORT_REG_SCHED_ENABLE); iowrite32(0xffffffff, port->hw_addr + MQNIC_PORT_REG_SCHED_ENABLE);
// enable queues // enable queues
for (k = 0; k < port->tx_queue_count; k++) for (k = 0; k < port->tx_queue_count; k++)
{ iowrite32(3, port->hw_addr + port->sched_offset + k * 4);
iowrite32(3, port->hw_addr+port->sched_offset+k*4);
}
return 0; return 0;
} }
void mqnic_deactivate_port(struct mqnic_port *port) void mqnic_deactivate_port(struct mqnic_port *port)
{ {
// disable schedulers // disable schedulers
iowrite32(0, port->hw_addr+MQNIC_PORT_REG_SCHED_ENABLE); iowrite32(0, port->hw_addr + MQNIC_PORT_REG_SCHED_ENABLE);
} }
u32 mqnic_port_get_rss_mask(struct mqnic_port *port) u32 mqnic_port_get_rss_mask(struct mqnic_port *port)
{ {
return ioread32(port->hw_addr+MQNIC_PORT_REG_RSS_MASK); return ioread32(port->hw_addr + MQNIC_PORT_REG_RSS_MASK);
} }
void mqnic_port_set_rss_mask(struct mqnic_port *port, u32 rss_mask) void mqnic_port_set_rss_mask(struct mqnic_port *port, u32 rss_mask)
{ {
iowrite32(rss_mask, port->hw_addr+MQNIC_PORT_REG_RSS_MASK); iowrite32(rss_mask, port->hw_addr + MQNIC_PORT_REG_RSS_MASK);
} }
u32 mqnic_port_get_tx_mtu(struct mqnic_port *port) u32 mqnic_port_get_tx_mtu(struct mqnic_port *port)
{ {
return ioread32(port->hw_addr+MQNIC_PORT_REG_TX_MTU); return ioread32(port->hw_addr + MQNIC_PORT_REG_TX_MTU);
} }
void mqnic_port_set_tx_mtu(struct mqnic_port *port, u32 mtu) void mqnic_port_set_tx_mtu(struct mqnic_port *port, u32 mtu)
{ {
iowrite32(mtu, port->hw_addr+MQNIC_PORT_REG_TX_MTU); iowrite32(mtu, port->hw_addr + MQNIC_PORT_REG_TX_MTU);
} }
u32 mqnic_port_get_rx_mtu(struct mqnic_port *port) u32 mqnic_port_get_rx_mtu(struct mqnic_port *port)
{ {
return ioread32(port->hw_addr+MQNIC_PORT_REG_RX_MTU); return ioread32(port->hw_addr + MQNIC_PORT_REG_RX_MTU);
} }
void mqnic_port_set_rx_mtu(struct mqnic_port *port, u32 mtu) void mqnic_port_set_rx_mtu(struct mqnic_port *port, u32 mtu)
{ {
iowrite32(mtu, port->hw_addr+MQNIC_PORT_REG_RX_MTU); iowrite32(mtu, port->hw_addr + MQNIC_PORT_REG_RX_MTU);
} }

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -34,271 +34,250 @@ either expressed or implied, of The Regents of the University of California.
#include "mqnic.h" #include "mqnic.h"
#include <linux/version.h> #include <linux/version.h>
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring, const struct mqnic_cpl *cpl) ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring,
const struct mqnic_cpl *cpl)
{ {
u64 ts_s = le16_to_cpu(cpl->ts_s); u64 ts_s = le16_to_cpu(cpl->ts_s);
u32 ts_ns = le32_to_cpu(cpl->ts_ns); u32 ts_ns = le32_to_cpu(cpl->ts_ns);
if (unlikely(!ring->ts_valid || (ring->ts_s ^ ts_s) & 0xff00)) if (unlikely(!ring->ts_valid || (ring->ts_s ^ ts_s) & 0xff00)) {
{ // seconds MSBs do not match, update cached timestamp
// seconds MSBs do not match, update cached timestamp ring->ts_s = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_CUR_SEC_L);
ring->ts_s = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_CUR_SEC_L); ring->ts_s |= (u64) ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_CUR_SEC_H) << 32;
ring->ts_s |= (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_CUR_SEC_H) << 32; ring->ts_valid = 1;
ring->ts_valid = 1; }
}
ts_s |= ring->ts_s & 0xffffffffffffff00; ts_s |= ring->ts_s & 0xffffffffffffff00;
return ktime_set(ts_s, ts_ns); return ktime_set(ts_s, ts_ns);
} }
static int mqnic_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) static int mqnic_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{ {
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info); struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
bool neg = false; bool neg = false;
u64 nom_per_fns, adj; u64 nom_per_fns, adj;
dev_info(mdev->dev, "mqnic_phc_adjfine scaled_ppm: %ld", scaled_ppm); dev_info(mdev->dev, "mqnic_phc_adjfine scaled_ppm: %ld", scaled_ppm);
if (scaled_ppm < 0) if (scaled_ppm < 0) {
{ neg = true;
neg = true; scaled_ppm = -scaled_ppm;
scaled_ppm = -scaled_ppm; }
}
nom_per_fns = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_NOM_PERIOD_FNS); nom_per_fns = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_NOM_PERIOD_FNS);
nom_per_fns = (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_NOM_PERIOD_NS) << 32; nom_per_fns = (u64) ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_NOM_PERIOD_NS) << 32;
if (nom_per_fns == 0) if (nom_per_fns == 0)
nom_per_fns = 0x4ULL << 32; nom_per_fns = 0x4ULL << 32;
adj = div_u64(((nom_per_fns >> 16) * scaled_ppm) + 500000, 1000000); adj = div_u64(((nom_per_fns >> 16) * scaled_ppm) + 500000, 1000000);
if (neg) if (neg)
{ adj = nom_per_fns - adj;
adj = nom_per_fns - adj; else
} adj = nom_per_fns + adj;
else
{
adj = nom_per_fns + adj;
}
iowrite32(adj & 0xffffffff, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_PERIOD_FNS); iowrite32(adj & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_FNS);
iowrite32(adj >> 32, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_PERIOD_NS); iowrite32(adj >> 32, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_NS);
dev_info(mdev->dev, "mqnic_phc_adjfine adj: 0x%llx", adj); dev_info(mdev->dev, "mqnic_phc_adjfine adj: 0x%llx", adj);
return 0; return 0;
} }
static int mqnic_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) static int mqnic_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{ {
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info); struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_FNS); ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_FNS);
ts->tv_nsec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_NS); ts->tv_nsec = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_NS);
ts->tv_sec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_L); ts->tv_sec = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_SEC_L);
ts->tv_sec |= (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_H) << 32; ts->tv_sec |= (u64) ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_SEC_H) << 32;
return 0; return 0;
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
static int mqnic_phc_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts) static int mqnic_phc_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{ {
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info); struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
ptp_read_system_prets(sts); ptp_read_system_prets(sts);
ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_FNS); ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_FNS);
ptp_read_system_postts(sts); ptp_read_system_postts(sts);
ts->tv_nsec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_NS); ts->tv_nsec = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_NS);
ts->tv_sec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_L); ts->tv_sec = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_SEC_L);
ts->tv_sec |= (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_H) << 32; ts->tv_sec |= (u64) ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_GET_SEC_H) << 32;
return 0; return 0;
} }
#endif #endif
static int mqnic_phc_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) static int mqnic_phc_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{ {
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info); struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
iowrite32(0, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_FNS); iowrite32(0, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_SET_FNS);
iowrite32(ts->tv_nsec, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_NS); iowrite32(ts->tv_nsec, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_SET_NS);
iowrite32(ts->tv_sec & 0xffffffff, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_SEC_L); iowrite32(ts->tv_sec & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_SET_SEC_L);
iowrite32(ts->tv_sec >> 32, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_SEC_H); iowrite32(ts->tv_sec >> 32, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_SET_SEC_H);
return 0; return 0;
} }
static int mqnic_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) static int mqnic_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{ {
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info); struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
struct timespec64 ts; struct timespec64 ts;
dev_info(mdev->dev, "mqnic_phc_adjtime delta: %lld", delta); dev_info(mdev->dev, "mqnic_phc_adjtime delta: %lld", delta);
if (delta > 1000000000 || delta < -1000000000) if (delta > 1000000000 || delta < -1000000000) {
{ mqnic_phc_gettime(ptp, &ts);
mqnic_phc_gettime(ptp, &ts); ts = timespec64_add(ts, ns_to_timespec64(delta));
ts = timespec64_add(ts, ns_to_timespec64(delta)); mqnic_phc_settime(ptp, &ts);
mqnic_phc_settime(ptp, &ts); } else {
} iowrite32(0, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_FNS);
else iowrite32(delta & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_NS);
{ iowrite32(1, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_COUNT);
iowrite32(0, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_ADJ_FNS); }
iowrite32(delta & 0xffffffff, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_ADJ_NS);
iowrite32(1, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_ADJ_COUNT);
}
return 0; return 0;
} }
static int mqnic_phc_perout(struct ptp_clock_info *ptp, int on, struct ptp_perout_request *perout) static int mqnic_phc_perout(struct ptp_clock_info *ptp, int on, struct ptp_perout_request *perout)
{ {
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info); struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
u8 __iomem *hw_addr; u8 __iomem *hw_addr;
u64 start_sec, period_sec, width_sec; u64 start_sec, period_sec, width_sec;
u32 start_nsec, period_nsec, width_nsec; u32 start_nsec, period_nsec, width_nsec;
if (perout->index >= mdev->ptp_clock_info.n_per_out) if (perout->index >= mdev->ptp_clock_info.n_per_out)
{ return -EINVAL;
return -EINVAL;
}
hw_addr = mdev->phc_hw_addr + MQNIC_PHC_PEROUT_OFFSET; hw_addr = mdev->phc_hw_addr + MQNIC_PHC_PEROUT_OFFSET;
if (!on) if (!on) {
{ iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_CTRL);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_CTRL);
return 0; return 0;
} }
start_nsec = perout->start.nsec; start_nsec = perout->start.nsec;
start_sec = start_nsec / NSEC_PER_SEC; start_sec = start_nsec / NSEC_PER_SEC;
start_nsec -= start_sec * NSEC_PER_SEC; start_nsec -= start_sec * NSEC_PER_SEC;
start_sec += perout->start.sec; start_sec += perout->start.sec;
period_nsec = perout->period.nsec; period_nsec = perout->period.nsec;
period_sec = period_nsec / NSEC_PER_SEC; period_sec = period_nsec / NSEC_PER_SEC;
period_nsec -= period_sec * NSEC_PER_SEC; period_nsec -= period_sec * NSEC_PER_SEC;
period_sec += perout->period.sec; period_sec += perout->period.sec;
// set width to half of period // set width to half of period
width_sec = period_sec >> 1; width_sec = period_sec >> 1;
width_nsec = (period_nsec + (period_sec & 1 ? NSEC_PER_SEC : 0)) >> 1; width_nsec = (period_nsec + (period_sec & 1 ? NSEC_PER_SEC : 0)) >> 1;
dev_info(mdev->dev, "mqnic_phc_perout start: %lld.%09d", start_sec, start_nsec); dev_info(mdev->dev, "mqnic_phc_perout start: %lld.%09d", start_sec, start_nsec);
dev_info(mdev->dev, "mqnic_phc_perout period: %lld.%09d", period_sec, period_nsec); dev_info(mdev->dev, "mqnic_phc_perout period: %lld.%09d", period_sec, period_nsec);
dev_info(mdev->dev, "mqnic_phc_perout width: %lld.%09d", width_sec, width_nsec); dev_info(mdev->dev, "mqnic_phc_perout width: %lld.%09d", width_sec, width_nsec);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_START_FNS); iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_START_FNS);
iowrite32(start_nsec, hw_addr+MQNIC_PHC_REG_PEROUT_START_NS); iowrite32(start_nsec, hw_addr + MQNIC_PHC_REG_PEROUT_START_NS);
iowrite32(start_sec & 0xffffffff, hw_addr+MQNIC_PHC_REG_PEROUT_START_SEC_L); iowrite32(start_sec & 0xffffffff, hw_addr + MQNIC_PHC_REG_PEROUT_START_SEC_L);
iowrite32(start_sec >> 32, hw_addr+MQNIC_PHC_REG_PEROUT_START_SEC_H); iowrite32(start_sec >> 32, hw_addr + MQNIC_PHC_REG_PEROUT_START_SEC_H);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_FNS); iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_PERIOD_FNS);
iowrite32(period_nsec, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_NS); iowrite32(period_nsec, hw_addr + MQNIC_PHC_REG_PEROUT_PERIOD_NS);
iowrite32(period_sec & 0xffffffff, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_SEC_L); iowrite32(period_sec & 0xffffffff, hw_addr + MQNIC_PHC_REG_PEROUT_PERIOD_SEC_L);
iowrite32(period_sec >> 32, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_SEC_H); iowrite32(period_sec >> 32, hw_addr + MQNIC_PHC_REG_PEROUT_PERIOD_SEC_H);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_FNS); iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_WIDTH_FNS);
iowrite32(width_nsec, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_NS); iowrite32(width_nsec, hw_addr + MQNIC_PHC_REG_PEROUT_WIDTH_NS);
iowrite32(width_sec & 0xffffffff, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_SEC_L); iowrite32(width_sec & 0xffffffff, hw_addr + MQNIC_PHC_REG_PEROUT_WIDTH_SEC_L);
iowrite32(width_sec >> 32, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_SEC_H); iowrite32(width_sec >> 32, hw_addr + MQNIC_PHC_REG_PEROUT_WIDTH_SEC_H);
iowrite32(1, hw_addr+MQNIC_PHC_REG_PEROUT_CTRL); iowrite32(1, hw_addr + MQNIC_PHC_REG_PEROUT_CTRL);
return 0; return 0;
} }
static int mqnic_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on) static int mqnic_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
{ {
if (request) if (!request)
{ return -EINVAL;
switch (request->type)
{ switch (request->type) {
case PTP_CLK_REQ_EXTTS: case PTP_CLK_REQ_EXTTS:
return -EINVAL; return -EINVAL;
case PTP_CLK_REQ_PEROUT: case PTP_CLK_REQ_PEROUT:
return mqnic_phc_perout(ptp, on, &request->perout); return mqnic_phc_perout(ptp, on, &request->perout);
case PTP_CLK_REQ_PPS: case PTP_CLK_REQ_PPS:
return -EINVAL; return -EINVAL;
default: default:
return -EINVAL; return -EINVAL;
} }
}
else
{
return -EINVAL;
}
} }
static void mqnic_phc_set_from_system_clock(struct ptp_clock_info *ptp) static void mqnic_phc_set_from_system_clock(struct ptp_clock_info *ptp)
{ {
struct timespec64 ts; struct timespec64 ts;
#ifdef ktime_get_clocktai_ts64 #ifdef ktime_get_clocktai_ts64
ktime_get_clocktai_ts64(&ts); ktime_get_clocktai_ts64(&ts);
#else #else
ts = ktime_to_timespec64(ktime_get_clocktai()); ts = ktime_to_timespec64(ktime_get_clocktai());
#endif #endif
mqnic_phc_settime(ptp, &ts); mqnic_phc_settime(ptp, &ts);
} }
void mqnic_register_phc(struct mqnic_dev *mdev) void mqnic_register_phc(struct mqnic_dev *mdev)
{ {
u32 phc_features; u32 phc_features;
if (mdev->ptp_clock) if (mdev->ptp_clock) {
{ dev_warn(mdev->dev, "PTP clock already registered");
return; return;
} }
phc_features = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_FEATURES); phc_features = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_FEATURES);
mdev->ptp_clock_info.owner = THIS_MODULE; mdev->ptp_clock_info.owner = THIS_MODULE;
mdev->ptp_clock_info.max_adj = 100000000, mdev->ptp_clock_info.max_adj = 100000000;
mdev->ptp_clock_info.n_alarm = 0, mdev->ptp_clock_info.n_alarm = 0;
mdev->ptp_clock_info.n_ext_ts = 0, mdev->ptp_clock_info.n_ext_ts = 0;
mdev->ptp_clock_info.n_per_out = phc_features & 0xff, mdev->ptp_clock_info.n_per_out = phc_features & 0xff;
mdev->ptp_clock_info.n_pins = 0, mdev->ptp_clock_info.n_pins = 0;
mdev->ptp_clock_info.pps = 0, mdev->ptp_clock_info.pps = 0;
mdev->ptp_clock_info.adjfine = mqnic_phc_adjfine, mdev->ptp_clock_info.adjfine = mqnic_phc_adjfine;
mdev->ptp_clock_info.adjtime = mqnic_phc_adjtime, mdev->ptp_clock_info.adjtime = mqnic_phc_adjtime;
mdev->ptp_clock_info.gettime64 = mqnic_phc_gettime, mdev->ptp_clock_info.gettime64 = mqnic_phc_gettime;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
mdev->ptp_clock_info.gettimex64 = mqnic_phc_gettimex, mdev->ptp_clock_info.gettimex64 = mqnic_phc_gettimex;
#endif #endif
mdev->ptp_clock_info.settime64 = mqnic_phc_settime, mdev->ptp_clock_info.settime64 = mqnic_phc_settime;
mdev->ptp_clock_info.enable = mqnic_phc_enable, mdev->ptp_clock_info.enable = mqnic_phc_enable;
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, mdev->dev);
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, mdev->dev); if (IS_ERR(mdev->ptp_clock)) {
mdev->ptp_clock = NULL;
dev_err(mdev->dev, "ptp_clock_register failed");
} else {
dev_info(mdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock));
if (IS_ERR(mdev->ptp_clock)) mqnic_phc_set_from_system_clock(&mdev->ptp_clock_info);
{ }
mdev->ptp_clock = NULL;
dev_err(mdev->dev, "ptp_clock_register failed");
}
else
{
dev_info(mdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock));
mqnic_phc_set_from_system_clock(&mdev->ptp_clock_info);
}
} }
void mqnic_unregister_phc(struct mqnic_dev *mdev) void mqnic_unregister_phc(struct mqnic_dev *mdev)
{ {
if (mdev->ptp_clock) if (mdev->ptp_clock) {
{ ptp_clock_unregister(mdev->ptp_clock);
ptp_clock_unregister(mdev->ptp_clock); mdev->ptp_clock = NULL;
mdev->ptp_clock = NULL; dev_info(mdev->dev, "unregistered PHC");
dev_info(mdev->dev, "unregistered PHC"); }
}
} }

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -33,392 +33,386 @@ either expressed or implied, of The Regents of the University of California.
#include "mqnic.h" #include "mqnic.h"
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr) int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr)
{ {
struct device *dev = priv->dev; struct device *dev = priv->dev;
struct mqnic_ring *ring; struct mqnic_ring *ring;
int ret; int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) if (!ring) {
{ dev_err(dev, "Failed to allocate RX ring");
dev_err(dev, "Failed to allocate RX ring"); return -ENOMEM;
return -ENOMEM; }
}
ring->size = roundup_pow_of_two(size); ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size-1; ring->size_mask = ring->size - 1;
ring->stride = roundup_pow_of_two(stride); ring->stride = roundup_pow_of_two(stride);
ring->desc_block_size = ring->stride/MQNIC_DESC_SIZE; ring->desc_block_size = ring->stride / MQNIC_DESC_SIZE;
ring->log_desc_block_size = ring->desc_block_size < 2 ? 0 : ilog2(ring->desc_block_size-1)+1; ring->log_desc_block_size = ring->desc_block_size < 2 ? 0 : ilog2(ring->desc_block_size - 1) + 1;
ring->desc_block_size = 1 << ring->log_desc_block_size; ring->desc_block_size = 1 << ring->log_desc_block_size;
ring->rx_info = kvzalloc(sizeof(*ring->rx_info)*ring->size, GFP_KERNEL); ring->rx_info = kvzalloc(sizeof(*ring->rx_info) * ring->size, GFP_KERNEL);
if (!ring->rx_info) if (!ring->rx_info) {
{ dev_err(dev, "Failed to allocate rx_info");
dev_err(dev, "Failed to allocate rx_info"); ret = -ENOMEM;
ret = -ENOMEM; goto fail_ring;
goto fail_ring; }
}
ring->buf_size = ring->size*ring->stride; ring->buf_size = ring->size * ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL); ring->buf = dma_alloc_coherent(dev, ring->buf_size,
if (!ring->buf) &ring->buf_dma_addr, GFP_KERNEL);
{ if (!ring->buf) {
dev_err(dev, "Failed to allocate RX ring DMA buffer"); dev_err(dev, "Failed to allocate RX ring DMA buffer");
ret = -ENOMEM; ret = -ENOMEM;
goto fail_info; goto fail_info;
} }
ring->hw_addr = hw_addr; ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff; ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_QUEUE_HEAD_PTR_REG; ring->hw_head_ptr = hw_addr + MQNIC_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_QUEUE_TAIL_PTR_REG; ring->hw_tail_ptr = hw_addr + MQNIC_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0; ring->head_ptr = 0;
ring->tail_ptr = 0; ring->tail_ptr = 0;
ring->clean_tail_ptr = 0; ring->clean_tail_ptr = 0;
// deactivate queue // deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address // set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0); iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4); iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 4);
// set completion queue index // set completion queue index
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG); iowrite32(0, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers // set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG); iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG); iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
// set size // set size
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring; *ring_ptr = ring;
return 0; return 0;
fail_info: fail_info:
kvfree(ring->rx_info); kvfree(ring->rx_info);
ring->rx_info = NULL; ring->rx_info = NULL;
fail_ring: fail_ring:
kfree(ring); kfree(ring);
*ring_ptr = NULL; *ring_ptr = NULL;
return ret; return ret;
} }
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr) void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr)
{ {
struct device *dev = priv->dev; struct device *dev = priv->dev;
struct mqnic_ring *ring = *ring_ptr; struct mqnic_ring *ring = *ring_ptr;
*ring_ptr = NULL; *ring_ptr = NULL;
mqnic_deactivate_rx_ring(priv, ring); mqnic_deactivate_rx_ring(priv, ring);
mqnic_free_rx_buf(priv, ring); mqnic_free_rx_buf(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr); dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kvfree(ring->rx_info); kvfree(ring->rx_info);
ring->rx_info = NULL; ring->rx_info = NULL;
kfree(ring); kfree(ring);
} }
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index) int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
int cpl_index)
{ {
// deactivate queue // deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address // set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0); iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4); iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 4);
// set completion queue index // set completion queue index
iowrite32(cpl_index, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG); iowrite32(cpl_index, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers // set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG); iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG); iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
// set size and activate queue // set size and activate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK,
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
mqnic_refill_rx_buffers(priv, ring); mqnic_refill_rx_buffers(priv, ring);
return 0; return 0;
} }
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring) void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
{ {
// deactivate queue // deactivate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
} }
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring) bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring)
{ {
return ring->head_ptr == ring->clean_tail_ptr; return ring->head_ptr == ring->clean_tail_ptr;
} }
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring) bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring)
{ {
return ring->head_ptr - ring->clean_tail_ptr >= ring->size; return ring->head_ptr - ring->clean_tail_ptr >= ring->size;
} }
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring) void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring)
{ {
ring->tail_ptr += (ioread32(ring->hw_tail_ptr) - ring->tail_ptr) & ring->hw_ptr_mask; ring->tail_ptr += (ioread32(ring->hw_tail_ptr) - ring->tail_ptr) & ring->hw_ptr_mask;
} }
void mqnic_rx_write_head_ptr(struct mqnic_ring *ring) void mqnic_rx_write_head_ptr(struct mqnic_ring *ring)
{ {
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr); iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
} }
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index) void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index)
{ {
struct mqnic_rx_info *rx_info = &ring->rx_info[index]; struct mqnic_rx_info *rx_info = &ring->rx_info[index];
struct page *page = rx_info->page; struct page *page = rx_info->page;
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr), dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE); dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr),
rx_info->dma_addr = 0; dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
__free_pages(page, rx_info->page_order); rx_info->dma_addr = 0;
rx_info->page = NULL; __free_pages(page, rx_info->page_order);
rx_info->page = NULL;
} }
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring) int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
{ {
u32 index; u32 index;
int cnt = 0; int cnt = 0;
while (!mqnic_is_rx_ring_empty(ring)) while (!mqnic_is_rx_ring_empty(ring)) {
{ index = ring->clean_tail_ptr & ring->size_mask;
index = ring->clean_tail_ptr & ring->size_mask; mqnic_free_rx_desc(priv, ring, index);
mqnic_free_rx_desc(priv, ring, index); ring->clean_tail_ptr++;
ring->clean_tail_ptr++; cnt++;
cnt++; }
}
ring->head_ptr = 0; ring->head_ptr = 0;
ring->tail_ptr = 0; ring->tail_ptr = 0;
ring->clean_tail_ptr = 0; ring->clean_tail_ptr = 0;
return cnt; return cnt;
} }
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index) int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index)
{ {
struct mqnic_rx_info *rx_info = &ring->rx_info[index]; struct mqnic_rx_info *rx_info = &ring->rx_info[index];
struct mqnic_desc *rx_desc = (struct mqnic_desc *)(ring->buf + index*ring->stride); struct mqnic_desc *rx_desc = (struct mqnic_desc *)(ring->buf + index * ring->stride);
struct page *page = rx_info->page; struct page *page = rx_info->page;
u32 page_order = ring->page_order; u32 page_order = ring->page_order;
u32 len = PAGE_SIZE << page_order; u32 len = PAGE_SIZE << page_order;
dma_addr_t dma_addr; dma_addr_t dma_addr;
if (unlikely(page)) if (unlikely(page)) {
{ dev_err(priv->dev, "mqnic_prepare_rx_desc skb not yet processed on port %d",
dev_err(priv->dev, "mqnic_prepare_rx_desc skb not yet processed on port %d", priv->port); priv->port);
return -1; return -1;
} }
page = dev_alloc_pages(page_order); page = dev_alloc_pages(page_order);
if (unlikely(!page)) if (unlikely(!page)) {
{ dev_err(priv->dev, "mqnic_prepare_rx_desc failed to allocate memory on port %d",
dev_err(priv->dev, "mqnic_prepare_rx_desc failed to allocate memory on port %d", priv->port); priv->port);
return -1; return -1;
} }
// map page // map page
dma_addr = dma_map_page(priv->dev, page, 0, len, PCI_DMA_FROMDEVICE); dma_addr = dma_map_page(priv->dev, page, 0, len, PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(priv->dev, dma_addr))) if (unlikely(dma_mapping_error(priv->dev, dma_addr))) {
{ dev_err(priv->dev, "mqnic_prepare_rx_desc DMA mapping failed on port %d",
dev_err(priv->dev, "mqnic_prepare_rx_desc DMA mapping failed on port %d", priv->port); priv->port);
__free_pages(page, page_order); __free_pages(page, page_order);
return -1; return -1;
} }
// write descriptor // write descriptor
rx_desc->len = cpu_to_le32(len); rx_desc->len = cpu_to_le32(len);
rx_desc->addr = cpu_to_le64(dma_addr); rx_desc->addr = cpu_to_le64(dma_addr);
// update rx_info // update rx_info
rx_info->page = page; rx_info->page = page;
rx_info->page_order = page_order; rx_info->page_order = page_order;
rx_info->page_offset = 0; rx_info->page_offset = 0;
rx_info->dma_addr = dma_addr; rx_info->dma_addr = dma_addr;
rx_info->len = len; rx_info->len = len;
return 0; return 0;
} }
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring) void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring)
{ {
u32 missing = ring->size - (ring->head_ptr - ring->clean_tail_ptr); u32 missing = ring->size - (ring->head_ptr - ring->clean_tail_ptr);
if (missing < 8) if (missing < 8)
return; return;
for ( ; missing-- > 0; ) for (; missing-- > 0;) {
{ if (mqnic_prepare_rx_desc(priv, ring, ring->head_ptr & ring->size_mask))
if (mqnic_prepare_rx_desc(priv, ring, ring->head_ptr & ring->size_mask)) break;
break; ring->head_ptr++;
ring->head_ptr++; }
}
// enqueue on NIC // enqueue on NIC
dma_wmb(); dma_wmb();
mqnic_rx_write_head_ptr(ring); mqnic_rx_write_head_ptr(ring);
} }
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget) int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
int napi_budget)
{ {
struct mqnic_priv *priv = netdev_priv(ndev); struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring = priv->rx_ring[cq_ring->ring_index]; struct mqnic_ring *ring = priv->rx_ring[cq_ring->ring_index];
struct mqnic_rx_info *rx_info; struct mqnic_rx_info *rx_info;
struct mqnic_cpl *cpl; struct mqnic_cpl *cpl;
struct sk_buff *skb; struct sk_buff *skb;
struct page *page; struct page *page;
u32 cq_index; u32 cq_index;
u32 cq_tail_ptr; u32 cq_tail_ptr;
u32 ring_index; u32 ring_index;
u32 ring_clean_tail_ptr; u32 ring_clean_tail_ptr;
int done = 0; int done = 0;
int budget = napi_budget; int budget = napi_budget;
u32 len; u32 len;
if (unlikely(!priv->port_up)) if (unlikely(!priv->port_up))
{ return done;
return done;
}
// process completion queue // process completion queue
// read head pointer from NIC // read head pointer from NIC
mqnic_cq_read_head_ptr(cq_ring); mqnic_cq_read_head_ptr(cq_ring);
cq_tail_ptr = cq_ring->tail_ptr; cq_tail_ptr = cq_ring->tail_ptr;
cq_index = cq_tail_ptr & cq_ring->size_mask; cq_index = cq_tail_ptr & cq_ring->size_mask;
mb(); // is a barrier here necessary? If so, what kind? mb(); // is a barrier here necessary? If so, what kind?
while (cq_ring->head_ptr != cq_tail_ptr && done < budget) while (cq_ring->head_ptr != cq_tail_ptr && done < budget) {
{ cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * cq_ring->stride);
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index*cq_ring->stride); ring_index = le16_to_cpu(cpl->index) & ring->size_mask;
ring_index = le16_to_cpu(cpl->index) & ring->size_mask; rx_info = &ring->rx_info[ring_index];
rx_info = &ring->rx_info[ring_index]; page = rx_info->page;
page = rx_info->page;
if (unlikely(!page)) if (unlikely(!page)) {
{ dev_err(priv->dev, "mqnic_process_rx_cq ring %d null page at index %d",
dev_err(priv->dev, "mqnic_process_rx_cq ring %d null page at index %d", cq_ring->ring_index, ring_index); cq_ring->ring_index, ring_index);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, cpl, MQNIC_CPL_SIZE, true); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
break; cpl, MQNIC_CPL_SIZE, true);
} break;
}
skb = napi_get_frags(&cq_ring->napi); skb = napi_get_frags(&cq_ring->napi);
if (unlikely(!skb)) if (unlikely(!skb)) {
{ dev_err(priv->dev, "mqnic_process_rx_cq ring %d failed to allocate skb",
dev_err(priv->dev, "mqnic_process_rx_cq ring %d failed to allocate skb", cq_ring->ring_index); cq_ring->ring_index);
break; break;
} }
// RX hardware timestamp // RX hardware timestamp
if (priv->if_features & MQNIC_IF_FEATURE_PTP_TS) if (priv->if_features & MQNIC_IF_FEATURE_PTP_TS)
{ skb_hwtstamps(skb)->hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
skb_hwtstamps(skb)->hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
}
skb_record_rx_queue(skb, cq_ring->ring_index); skb_record_rx_queue(skb, cq_ring->ring_index);
// RX hardware checksum // RX hardware checksum
if (ndev->features & NETIF_F_RXCSUM) if (ndev->features & NETIF_F_RXCSUM) {
{ skb->csum = csum_unfold((__sum16) cpu_to_be16(le16_to_cpu(cpl->rx_csum)));
skb->csum = csum_unfold((__sum16)cpu_to_be16(le16_to_cpu(cpl->rx_csum))); skb->ip_summed = CHECKSUM_COMPLETE;
skb->ip_summed = CHECKSUM_COMPLETE; }
}
// unmap // unmap
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr), dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE); dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr),
rx_info->dma_addr = 0; dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
rx_info->dma_addr = 0;
len = min_t(u32, le16_to_cpu(cpl->len), rx_info->len); len = min_t(u32, le16_to_cpu(cpl->len), rx_info->len);
dma_sync_single_range_for_cpu(priv->dev, rx_info->dma_addr, rx_info->page_offset, rx_info->len, PCI_DMA_FROMDEVICE); dma_sync_single_range_for_cpu(priv->dev, rx_info->dma_addr, rx_info->page_offset,
rx_info->len, PCI_DMA_FROMDEVICE);
__skb_fill_page_desc(skb, 0, page, rx_info->page_offset, len); __skb_fill_page_desc(skb, 0, page, rx_info->page_offset, len);
rx_info->page = NULL; rx_info->page = NULL;
skb_shinfo(skb)->nr_frags = 1; skb_shinfo(skb)->nr_frags = 1;
skb->len = len; skb->len = len;
skb->data_len = len; skb->data_len = len;
skb->truesize += rx_info->len; skb->truesize += rx_info->len;
// hand off SKB // hand off SKB
napi_gro_frags(&cq_ring->napi); napi_gro_frags(&cq_ring->napi);
ring->packets++; ring->packets++;
ring->bytes += le16_to_cpu(cpl->len); ring->bytes += le16_to_cpu(cpl->len);
done++; done++;
cq_tail_ptr++; cq_tail_ptr++;
cq_index = cq_tail_ptr & cq_ring->size_mask; cq_index = cq_tail_ptr & cq_ring->size_mask;
} }
// update CQ tail // update CQ tail
cq_ring->tail_ptr = cq_tail_ptr; cq_ring->tail_ptr = cq_tail_ptr;
mqnic_cq_write_tail_ptr(cq_ring); mqnic_cq_write_tail_ptr(cq_ring);
// process ring // process ring
// read tail pointer from NIC // read tail pointer from NIC
mqnic_rx_read_tail_ptr(ring); mqnic_rx_read_tail_ptr(ring);
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr); ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
ring_index = ring_clean_tail_ptr & ring->size_mask; ring_index = ring_clean_tail_ptr & ring->size_mask;
while (ring_clean_tail_ptr != ring->tail_ptr) while (ring_clean_tail_ptr != ring->tail_ptr) {
{ rx_info = &ring->rx_info[ring_index];
rx_info = &ring->rx_info[ring_index];
if (rx_info->page) if (rx_info->page)
break; break;
ring_clean_tail_ptr++; ring_clean_tail_ptr++;
ring_index = ring_clean_tail_ptr & ring->size_mask; ring_index = ring_clean_tail_ptr & ring->size_mask;
} }
// update ring tail // update ring tail
WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr); WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr);
// replenish buffers // replenish buffers
mqnic_refill_rx_buffers(priv, ring); mqnic_refill_rx_buffers(priv, ring);
return done; return done;
} }
void mqnic_rx_irq(struct mqnic_cq_ring *cq) void mqnic_rx_irq(struct mqnic_cq_ring *cq)
{ {
struct mqnic_priv *priv = netdev_priv(cq->ndev); struct mqnic_priv *priv = netdev_priv(cq->ndev);
if (likely(priv->port_up)) if (likely(priv->port_up))
{ napi_schedule_irqoff(&cq->napi);
napi_schedule_irqoff(&cq->napi); else
} mqnic_arm_cq(cq);
else
{
mqnic_arm_cq(cq);
}
} }
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget) int mqnic_poll_rx_cq(struct napi_struct *napi, int budget)
{ {
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi); struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
struct net_device *ndev = cq_ring->ndev; struct net_device *ndev = cq_ring->ndev;
int done; int done;
done = mqnic_process_rx_cq(ndev, cq_ring, budget); done = mqnic_process_rx_cq(ndev, cq_ring, budget);
if (done == budget) if (done == budget)
{ return done;
return done;
}
napi_complete(napi); napi_complete(napi);
mqnic_arm_cq(cq_ring); mqnic_arm_cq(cq_ring);
return done; return done;
} }

View File

@ -1,6 +1,6 @@
/* /*
Copyright 2019, The Regents of the University of California. Copyright 2019-2021, The Regents of the University of California.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -34,520 +34,487 @@ either expressed or implied, of The Regents of the University of California.
#include <linux/version.h> #include <linux/version.h>
#include "mqnic.h" #include "mqnic.h"
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr) int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
int size, int stride, int index, u8 __iomem *hw_addr)
{ {
struct device *dev = priv->dev; struct device *dev = priv->dev;
struct mqnic_ring *ring; struct mqnic_ring *ring;
int ret; int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) if (!ring) {
{ dev_err(dev, "Failed to allocate TX ring");
dev_err(dev, "Failed to allocate TX ring"); return -ENOMEM;
return -ENOMEM; }
}
ring->size = roundup_pow_of_two(size); ring->size = roundup_pow_of_two(size);
ring->full_size = ring->size >> 1; ring->full_size = ring->size >> 1;
ring->size_mask = ring->size-1; ring->size_mask = ring->size - 1;
ring->stride = roundup_pow_of_two(stride); ring->stride = roundup_pow_of_two(stride);
ring->desc_block_size = ring->stride/MQNIC_DESC_SIZE; ring->desc_block_size = ring->stride / MQNIC_DESC_SIZE;
ring->log_desc_block_size = ring->desc_block_size < 2 ? 0 : ilog2(ring->desc_block_size-1)+1; ring->log_desc_block_size = ring->desc_block_size < 2 ? 0 : ilog2(ring->desc_block_size - 1) + 1;
ring->desc_block_size = 1 << ring->log_desc_block_size; ring->desc_block_size = 1 << ring->log_desc_block_size;
ring->tx_info = kvzalloc(sizeof(*ring->tx_info)*ring->size, GFP_KERNEL); ring->tx_info = kvzalloc(sizeof(*ring->tx_info) * ring->size, GFP_KERNEL);
if (!ring->tx_info) if (!ring->tx_info) {
{ dev_err(dev, "Failed to allocate tx_info");
dev_err(dev, "Failed to allocate tx_info"); ret = -ENOMEM;
ret = -ENOMEM; goto fail_ring;
goto fail_ring; }
}
ring->buf_size = ring->size*ring->stride; ring->buf_size = ring->size * ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL); ring->buf = dma_alloc_coherent(dev, ring->buf_size,
if (!ring->buf) &ring->buf_dma_addr, GFP_KERNEL);
{ if (!ring->buf) {
dev_err(dev, "Failed to allocate TX ring DMA buffer"); dev_err(dev, "Failed to allocate TX ring DMA buffer");
ret = -ENOMEM; ret = -ENOMEM;
goto fail_info; goto fail_info;
} }
ring->hw_addr = hw_addr; ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff; ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_QUEUE_HEAD_PTR_REG; ring->hw_head_ptr = hw_addr + MQNIC_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_QUEUE_TAIL_PTR_REG; ring->hw_tail_ptr = hw_addr + MQNIC_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0; ring->head_ptr = 0;
ring->tail_ptr = 0; ring->tail_ptr = 0;
ring->clean_tail_ptr = 0; ring->clean_tail_ptr = 0;
// deactivate queue // deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address // set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0); iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4); iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 4);
// set completion queue index // set completion queue index
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG); iowrite32(0, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers // set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG); iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG); iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
// set size // set size
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring; *ring_ptr = ring;
return 0; return 0;
fail_info: fail_info:
kvfree(ring->tx_info); kvfree(ring->tx_info);
ring->tx_info = NULL; ring->tx_info = NULL;
fail_ring: fail_ring:
kfree(ring); kfree(ring);
*ring_ptr = NULL; *ring_ptr = NULL;
return ret; return ret;
} }
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr) void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr)
{ {
struct device *dev = priv->dev; struct device *dev = priv->dev;
struct mqnic_ring *ring = *ring_ptr; struct mqnic_ring *ring = *ring_ptr;
*ring_ptr = NULL; *ring_ptr = NULL;
mqnic_deactivate_tx_ring(priv, ring); mqnic_deactivate_tx_ring(priv, ring);
mqnic_free_tx_buf(priv, ring); mqnic_free_tx_buf(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr); dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kvfree(ring->tx_info); kvfree(ring->tx_info);
ring->tx_info = NULL; ring->tx_info = NULL;
kfree(ring); kfree(ring);
} }
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index) int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
int cpl_index)
{ {
// deactivate queue // deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address // set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0); iowrite32(ring->buf_dma_addr, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4); iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr + MQNIC_QUEUE_BASE_ADDR_REG + 4);
// set completion queue index // set completion queue index
iowrite32(cpl_index, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG); iowrite32(cpl_index, ring->hw_addr + MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers // set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG); iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG); iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
// set size and activate queue // set size and activate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK,
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
return 0; return 0;
} }
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring) void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
{ {
// deactivate queue // deactivate queue
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG); iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
} }
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring) bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring)
{ {
return ring->head_ptr == ring->clean_tail_ptr; return ring->head_ptr == ring->clean_tail_ptr;
} }
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring) bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring)
{ {
return ring->head_ptr - ring->clean_tail_ptr >= ring->full_size; return ring->head_ptr - ring->clean_tail_ptr >= ring->full_size;
} }
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring) void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring)
{ {
ring->tail_ptr += (ioread32(ring->hw_tail_ptr) - ring->tail_ptr) & ring->hw_ptr_mask; ring->tail_ptr += (ioread32(ring->hw_tail_ptr) - ring->tail_ptr) & ring->hw_ptr_mask;
} }
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring) void mqnic_tx_write_head_ptr(struct mqnic_ring *ring)
{ {
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr); iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
} }
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index, int napi_budget) void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
int index, int napi_budget)
{ {
struct mqnic_tx_info *tx_info = &ring->tx_info[index]; struct mqnic_tx_info *tx_info = &ring->tx_info[index];
struct sk_buff *skb = tx_info->skb; struct sk_buff *skb = tx_info->skb;
u32 i; u32 i;
prefetchw(&skb->users); prefetchw(&skb->users);
dma_unmap_single(priv->dev, dma_unmap_addr(tx_info, dma_addr), dma_unmap_len(tx_info, len), PCI_DMA_TODEVICE); dma_unmap_single(priv->dev, dma_unmap_addr(tx_info, dma_addr),
dma_unmap_addr_set(tx_info, dma_addr, 0); dma_unmap_len(tx_info, len), PCI_DMA_TODEVICE);
dma_unmap_addr_set(tx_info, dma_addr, 0);
// unmap frags // unmap frags
for (i = 0; i < tx_info->frag_count; i++) for (i = 0; i < tx_info->frag_count; i++)
{ dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr,
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr, tx_info->frags[i].len, PCI_DMA_TODEVICE); tx_info->frags[i].len, PCI_DMA_TODEVICE);
}
napi_consume_skb(skb, napi_budget); napi_consume_skb(skb, napi_budget);
tx_info->skb = NULL; tx_info->skb = NULL;
} }
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring) int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
{ {
u32 index; u32 index;
int cnt = 0; int cnt = 0;
while (!mqnic_is_tx_ring_empty(ring)) while (!mqnic_is_tx_ring_empty(ring)) {
{ index = ring->clean_tail_ptr & ring->size_mask;
index = ring->clean_tail_ptr & ring->size_mask; mqnic_free_tx_desc(priv, ring, index, 0);
mqnic_free_tx_desc(priv, ring, index, 0); ring->clean_tail_ptr++;
ring->clean_tail_ptr++; cnt++;
cnt++; }
}
ring->head_ptr = 0; ring->head_ptr = 0;
ring->tail_ptr = 0; ring->tail_ptr = 0;
ring->clean_tail_ptr = 0; ring->clean_tail_ptr = 0;
return cnt; return cnt;
} }
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget) int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
int napi_budget)
{ {
struct mqnic_priv *priv = netdev_priv(ndev); struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring = priv->tx_ring[cq_ring->ring_index]; struct mqnic_ring *ring = priv->tx_ring[cq_ring->ring_index];
struct mqnic_tx_info *tx_info; struct mqnic_tx_info *tx_info;
struct mqnic_cpl *cpl; struct mqnic_cpl *cpl;
u32 cq_index; u32 cq_index;
u32 cq_tail_ptr; u32 cq_tail_ptr;
u32 ring_index; u32 ring_index;
u32 ring_clean_tail_ptr; u32 ring_clean_tail_ptr;
u32 packets = 0; u32 packets = 0;
u32 bytes = 0; u32 bytes = 0;
int done = 0; int done = 0;
int budget = napi_budget; int budget = napi_budget;
if (unlikely(!priv->port_up)) if (unlikely(!priv->port_up))
{ return done;
return done;
}
// prefetch for BQL // prefetch for BQL
netdev_txq_bql_complete_prefetchw(ring->tx_queue); netdev_txq_bql_complete_prefetchw(ring->tx_queue);
// process completion queue // process completion queue
// read head pointer from NIC // read head pointer from NIC
mqnic_cq_read_head_ptr(cq_ring); mqnic_cq_read_head_ptr(cq_ring);
cq_tail_ptr = cq_ring->tail_ptr; cq_tail_ptr = cq_ring->tail_ptr;
cq_index = cq_tail_ptr & cq_ring->size_mask; cq_index = cq_tail_ptr & cq_ring->size_mask;
while (cq_ring->head_ptr != cq_tail_ptr && done < budget) while (cq_ring->head_ptr != cq_tail_ptr && done < budget) {
{ cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * cq_ring->stride);
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index*cq_ring->stride); ring_index = le16_to_cpu(cpl->index) & ring->size_mask;
ring_index = le16_to_cpu(cpl->index) & ring->size_mask; tx_info = &ring->tx_info[ring_index];
tx_info = &ring->tx_info[ring_index];
// TX hardware timestamp // TX hardware timestamp
if (unlikely(tx_info->ts_requested)) if (unlikely(tx_info->ts_requested)) {
{ struct skb_shared_hwtstamps hwts;
struct skb_shared_hwtstamps hwts; dev_info(priv->dev, "mqnic_process_tx_cq TX TS requested");
dev_info(priv->dev, "mqnic_process_tx_cq TX TS requested"); hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl); skb_tstamp_tx(tx_info->skb, &hwts);
skb_tstamp_tx(tx_info->skb, &hwts); }
} // free TX descriptor
mqnic_free_tx_desc(priv, ring, ring_index, napi_budget);
// free TX descriptor packets++;
mqnic_free_tx_desc(priv, ring, ring_index, napi_budget); bytes += le16_to_cpu(cpl->len);
packets++; done++;
bytes += le16_to_cpu(cpl->len);
done++; cq_tail_ptr++;
cq_index = cq_tail_ptr & cq_ring->size_mask;
}
cq_tail_ptr++; // update CQ tail
cq_index = cq_tail_ptr & cq_ring->size_mask; cq_ring->tail_ptr = cq_tail_ptr;
} mqnic_cq_write_tail_ptr(cq_ring);
// update CQ tail // process ring
cq_ring->tail_ptr = cq_tail_ptr; // read tail pointer from NIC
mqnic_cq_write_tail_ptr(cq_ring); mqnic_tx_read_tail_ptr(ring);
// process ring ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
// read tail pointer from NIC ring_index = ring_clean_tail_ptr & ring->size_mask;
mqnic_tx_read_tail_ptr(ring);
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr); while (ring_clean_tail_ptr != ring->tail_ptr) {
ring_index = ring_clean_tail_ptr & ring->size_mask; tx_info = &ring->tx_info[ring_index];
while (ring_clean_tail_ptr != ring->tail_ptr) if (tx_info->skb)
{ break;
tx_info = &ring->tx_info[ring_index];
if (tx_info->skb) ring_clean_tail_ptr++;
break; ring_index = ring_clean_tail_ptr & ring->size_mask;
}
ring_clean_tail_ptr++; // update ring tail
ring_index = ring_clean_tail_ptr & ring->size_mask; WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr);
}
// update ring tail // BQL
WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr); //netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
// BQL // wake queue if it is stopped
//netdev_tx_completed_queue(ring->tx_queue, packets, bytes); if (netif_tx_queue_stopped(ring->tx_queue) && !mqnic_is_tx_ring_full(ring))
netif_tx_wake_queue(ring->tx_queue);
// wake queue if it is stopped return done;
if (netif_tx_queue_stopped(ring->tx_queue) && !mqnic_is_tx_ring_full(ring))
{
netif_tx_wake_queue(ring->tx_queue);
}
return done;
} }
void mqnic_tx_irq(struct mqnic_cq_ring *cq) void mqnic_tx_irq(struct mqnic_cq_ring *cq)
{ {
struct mqnic_priv *priv = netdev_priv(cq->ndev); struct mqnic_priv *priv = netdev_priv(cq->ndev);
if (likely(priv->port_up)) if (likely(priv->port_up))
{ napi_schedule_irqoff(&cq->napi);
napi_schedule_irqoff(&cq->napi); else
} mqnic_arm_cq(cq);
else
{
mqnic_arm_cq(cq);
}
} }
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget) int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
{ {
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi); struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
struct net_device *ndev = cq_ring->ndev; struct net_device *ndev = cq_ring->ndev;
int done; int done;
done = mqnic_process_tx_cq(ndev, cq_ring, budget); done = mqnic_process_tx_cq(ndev, cq_ring, budget);
if (done == budget) if (done == budget)
{ return done;
return done;
}
napi_complete(napi); napi_complete(napi);
mqnic_arm_cq(cq_ring); mqnic_arm_cq(cq_ring);
return done; return done;
} }
static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring, struct mqnic_tx_info *tx_info, struct mqnic_desc *tx_desc, struct sk_buff *skb) static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring,
struct mqnic_tx_info *tx_info,
struct mqnic_desc *tx_desc, struct sk_buff *skb)
{ {
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 i; u32 i;
u32 len; u32 len;
dma_addr_t dma_addr; dma_addr_t dma_addr;
// update tx_info // update tx_info
tx_info->skb = skb; tx_info->skb = skb;
tx_info->frag_count = 0; tx_info->frag_count = 0;
for (i = 0; i < shinfo->nr_frags; i++) for (i = 0; i < shinfo->nr_frags; i++) {
{ const skb_frag_t *frag = &shinfo->frags[i];
const skb_frag_t *frag = &shinfo->frags[i]; len = skb_frag_size(frag);
len = skb_frag_size(frag); dma_addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
dma_addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
if (unlikely(dma_mapping_error(priv->dev, dma_addr))) // mapping failed
{ goto map_error;
// mapping failed
goto map_error;
}
// write descriptor // write descriptor
tx_desc[i+1].len = cpu_to_le32(len); tx_desc[i + 1].len = cpu_to_le32(len);
tx_desc[i+1].addr = cpu_to_le64(dma_addr); tx_desc[i + 1].addr = cpu_to_le64(dma_addr);
// update tx_info // update tx_info
tx_info->frag_count = i+1; tx_info->frag_count = i + 1;
tx_info->frags[i].len = len; tx_info->frags[i].len = len;
tx_info->frags[i].dma_addr = dma_addr; tx_info->frags[i].dma_addr = dma_addr;
} }
for (i = tx_info->frag_count; i < ring->desc_block_size-1; i++) for (i = tx_info->frag_count; i < ring->desc_block_size - 1; i++) {
{ tx_desc[i + 1].len = 0;
tx_desc[i+1].len = 0; tx_desc[i + 1].addr = 0;
tx_desc[i+1].addr = 0; }
}
// map skb // map skb
len = skb_headlen(skb); len = skb_headlen(skb);
dma_addr = dma_map_single(priv->dev, skb->data, len, PCI_DMA_TODEVICE); dma_addr = dma_map_single(priv->dev, skb->data, len, PCI_DMA_TODEVICE);
if (unlikely(dma_mapping_error(priv->dev, dma_addr))) if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
{ // mapping failed
// mapping failed goto map_error;
goto map_error;
}
// write descriptor // write descriptor
tx_desc[0].len = cpu_to_le32(len); tx_desc[0].len = cpu_to_le32(len);
tx_desc[0].addr = cpu_to_le64(dma_addr); tx_desc[0].addr = cpu_to_le64(dma_addr);
// update tx_info // update tx_info
dma_unmap_addr_set(tx_info, dma_addr, dma_addr); dma_unmap_addr_set(tx_info, dma_addr, dma_addr);
dma_unmap_len_set(tx_info, len, len); dma_unmap_len_set(tx_info, len, len);
return true; return true;
map_error: map_error:
dev_err(priv->dev, "mqnic_map_skb DMA mapping failed"); dev_err(priv->dev, "mqnic_map_skb DMA mapping failed");
// unmap frags // unmap frags
for (i = 0; i < tx_info->frag_count; i++) for (i = 0; i < tx_info->frag_count; i++)
{ dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr,
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr, tx_info->frags[i].len, PCI_DMA_TODEVICE); tx_info->frags[i].len, PCI_DMA_TODEVICE);
}
// update tx_info // update tx_info
tx_info->skb = NULL; tx_info->skb = NULL;
tx_info->frag_count = 0; tx_info->frag_count = 0;
return false; return false;
} }
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev) netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{ {
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
struct mqnic_priv *priv = netdev_priv(ndev); struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring; struct mqnic_ring *ring;
struct mqnic_tx_info *tx_info; struct mqnic_tx_info *tx_info;
struct mqnic_desc *tx_desc; struct mqnic_desc *tx_desc;
int ring_index; int ring_index;
u32 index; u32 index;
bool stop_queue; bool stop_queue;
u32 clean_tail_ptr; u32 clean_tail_ptr;
if (unlikely(!priv->port_up)) if (unlikely(!priv->port_up))
{ goto tx_drop;
goto tx_drop;
}
ring_index = skb_get_queue_mapping(skb); ring_index = skb_get_queue_mapping(skb);
if (unlikely(ring_index >= priv->tx_queue_count)) if (unlikely(ring_index >= priv->tx_queue_count))
{ // queue mapping out of range
// queue mapping out of range goto tx_drop;
goto tx_drop;
}
ring = priv->tx_ring[ring_index]; ring = priv->tx_ring[ring_index];
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr); clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
// prefetch for BQL // prefetch for BQL
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
index = ring->head_ptr & ring->size_mask; index = ring->head_ptr & ring->size_mask;
tx_desc = (struct mqnic_desc *)(ring->buf + index*ring->stride); tx_desc = (struct mqnic_desc *)(ring->buf + index * ring->stride);
tx_info = &ring->tx_info[index]; tx_info = &ring->tx_info[index];
// TX hardware timestamp // TX hardware timestamp
tx_info->ts_requested = 0; tx_info->ts_requested = 0;
if (unlikely(priv->if_features & MQNIC_IF_FEATURE_PTP_TS && shinfo->tx_flags & SKBTX_HW_TSTAMP)) { if (unlikely(priv->if_features & MQNIC_IF_FEATURE_PTP_TS && shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
dev_info(priv->dev, "mqnic_start_xmit TX TS requested"); dev_info(priv->dev, "mqnic_start_xmit TX TS requested");
shinfo->tx_flags |= SKBTX_IN_PROGRESS; shinfo->tx_flags |= SKBTX_IN_PROGRESS;
tx_info->ts_requested = 1; tx_info->ts_requested = 1;
} }
// TX hardware checksum // TX hardware checksum
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned int csum_start = skb_checksum_start_offset(skb); unsigned int csum_start = skb_checksum_start_offset(skb);
unsigned int csum_offset = skb->csum_offset; unsigned int csum_offset = skb->csum_offset;
if (csum_start > 255 || csum_offset > 127) if (csum_start > 255 || csum_offset > 127) {
{ dev_info(priv->dev, "mqnic_start_xmit Hardware checksum fallback start %d offset %d",
dev_info(priv->dev, "mqnic_start_xmit Hardware checksum fallback start %d offset %d", csum_start, csum_offset); csum_start, csum_offset);
// offset out of range, fall back on software checksum // offset out of range, fall back on software checksum
if (skb_checksum_help(skb)) if (skb_checksum_help(skb)) {
{ // software checksumming failed
// software checksumming failed goto tx_drop_count;
goto tx_drop_count; }
} tx_desc->tx_csum_cmd = 0;
tx_desc->tx_csum_cmd = 0; } else {
} tx_desc->tx_csum_cmd = cpu_to_le16(0x8000 | (csum_offset << 8) | (csum_start));
else }
{ } else {
tx_desc->tx_csum_cmd = cpu_to_le16(0x8000 | (csum_offset << 8) | (csum_start)); tx_desc->tx_csum_cmd = 0;
} }
}
else
{
tx_desc->tx_csum_cmd = 0;
}
if (shinfo->nr_frags > ring->desc_block_size-1 || (skb->data_len && skb->data_len < 32)) if (shinfo->nr_frags > ring->desc_block_size - 1 || (skb->data_len && skb->data_len < 32)) {
{ // too many frags or very short data portion; linearize
// too many frags or very short data portion; linearize if (skb_linearize(skb))
if (skb_linearize(skb)) goto tx_drop_count;
{ }
goto tx_drop_count;
}
}
// map skb // map skb
if (!mqnic_map_skb(priv, ring, tx_info, tx_desc, skb)) if (!mqnic_map_skb(priv, ring, tx_info, tx_desc, skb))
{ // map failed
// map failed goto tx_drop_count;
goto tx_drop_count;
}
// count packet // count packet
ring->packets++; ring->packets++;
ring->bytes += skb->len; ring->bytes += skb->len;
// enqueue // enqueue
ring->head_ptr++; ring->head_ptr++;
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
stop_queue = mqnic_is_tx_ring_full(ring); stop_queue = mqnic_is_tx_ring_full(ring);
if (unlikely(stop_queue)) if (unlikely(stop_queue)) {
{ dev_info(priv->dev, "mqnic_start_xmit TX ring %d full on port %d",
dev_info(priv->dev, "mqnic_start_xmit TX ring %d full on port %d", ring_index, priv->port); ring_index, priv->port);
netif_tx_stop_queue(ring->tx_queue); netif_tx_stop_queue(ring->tx_queue);
} }
// BQL // BQL
//netdev_tx_sent_queue(ring->tx_queue, tx_info->len); //netdev_tx_sent_queue(ring->tx_queue, tx_info->len);
//__netdev_tx_sent_queue(ring->tx_queue, tx_info->len, skb->xmit_more); //__netdev_tx_sent_queue(ring->tx_queue, tx_info->len, skb->xmit_more);
// enqueue on NIC // enqueue on NIC
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
if (unlikely(!netdev_xmit_more() || stop_queue)) if (unlikely(!netdev_xmit_more() || stop_queue)) {
#else #else
if (unlikely(!skb->xmit_more || stop_queue)) if (unlikely(!skb->xmit_more || stop_queue)) {
#endif #endif
{ dma_wmb();
dma_wmb(); mqnic_tx_write_head_ptr(ring);
mqnic_tx_write_head_ptr(ring); }
}
// check if queue restarted // check if queue restarted
if (unlikely(stop_queue)) if (unlikely(stop_queue)) {
{ smp_rmb();
smp_rmb();
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr); clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
if (unlikely(!mqnic_is_tx_ring_full(ring))) if (unlikely(!mqnic_is_tx_ring_full(ring)))
{ netif_tx_wake_queue(ring->tx_queue);
netif_tx_wake_queue(ring->tx_queue); }
}
}
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_drop_count: tx_drop_count:
ring->dropped_packets++; ring->dropped_packets++;
tx_drop: tx_drop:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }