1
0
mirror of https://github.com/corundum/corundum.git synced 2025-01-16 08:12:53 +08:00

Initial commit of mqnic kernel module

This commit is contained in:
Alex Forencich 2019-07-17 18:13:51 -07:00
parent 1df012a8d4
commit 6c5b6c99a1
12 changed files with 3300 additions and 0 deletions

11
modules/mqnic/Makefile Normal file
View File

@ -0,0 +1,11 @@
# object files to build
obj-m += mqnic.o
mqnic-objs += mqnic_main.o mqnic_dev.o mqnic_netdev.o mqnic_ethtool.o mqnic_ptp.o mqnic_tx.o mqnic_rx.o mqnic_cq.o mqnic_eq.o
all:
make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
clean:
make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean

325
modules/mqnic/mqnic.h Normal file
View File

@ -0,0 +1,325 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#ifndef MQNIC_H
#define MQNIC_H
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/miscdevice.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#define DRIVER_NAME "mqnic"
#define DRIVER_VERSION "0.1"
#include "mqnic_hw.h"
struct mqnic_i2c_priv
{
struct mqnic_dev *mqnic;
u8 __iomem *scl_in_reg;
u8 __iomem *scl_out_reg;
u8 __iomem *sda_in_reg;
u8 __iomem *sda_out_reg;
uint32_t scl_in_mask;
uint32_t scl_out_mask;
uint32_t sda_in_mask;
uint32_t sda_out_mask;
};
struct mqnic_dev {
struct pci_dev *pdev;
size_t hw_regs_size;
phys_addr_t hw_regs_phys;
u8 __iomem *hw_addr;
u8 __iomem *phc_hw_addr;
u8 base_mac[ETH_ALEN];
char name[16];
unsigned int id;
struct list_head dev_list_node;
struct miscdevice misc_dev;
u32 fw_id;
u32 fw_ver;
u32 board_id;
u32 board_ver;
u32 phc_count;
u32 phc_offset;
u32 if_count;
u32 if_stride;
u32 if_csr_offset;
struct net_device *ndev[MQNIC_MAX_IF];
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct i2c_algo_bit_data if_i2c_algo[MQNIC_MAX_IF];
struct i2c_adapter if_i2c_adap[MQNIC_MAX_IF];
struct mqnic_i2c_priv if_i2c_priv[MQNIC_MAX_IF];
struct i2c_algo_bit_data eeprom_i2c_algo;
struct i2c_adapter eeprom_i2c_adap;
struct mqnic_i2c_priv eeprom_i2c_priv;
struct i2c_client *eeprom_i2c_client;
};
struct mqnic_tx_info {
struct sk_buff *skb;
dma_addr_t dma_addr;
int len;
int ts_requested;
};
struct mqnic_rx_info {
struct sk_buff *skb;
dma_addr_t dma_addr;
int len;
};
struct mqnic_ring {
// written on enqueue (i.e. start_xmit)
u32 head_ptr;
u64 bytes;
u64 packets;
u64 dropped_packets;
struct netdev_queue *tx_queue;
// written from completion
u32 tail_ptr ____cacheline_aligned_in_smp;
u32 clean_tail_ptr;
u64 ts_s;
u8 ts_valid;
// mostly constant
u32 size;
u32 full_size;
u32 size_mask;
u32 stride;
u32 cpl_index;
size_t buf_size;
u8 *buf;
dma_addr_t buf_dma_addr;
union {
struct mqnic_tx_info *tx_info;
struct mqnic_rx_info *rx_info;
};
u32 hw_ptr_mask;
u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr;
} ____cacheline_aligned_in_smp;
struct mqnic_cq_ring {
u32 head_ptr;
u32 tail_ptr;
u32 size;
u32 size_mask;
u32 stride;
size_t buf_size;
u8 *buf;
dma_addr_t buf_dma_addr;
struct net_device *ndev;
struct napi_struct napi;
int ring_index;
int int_index;
void (*handler) (struct mqnic_cq_ring *);
u32 hw_ptr_mask;
u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr;
};
struct mqnic_eq_ring {
u32 head_ptr;
u32 tail_ptr;
u32 size;
u32 size_mask;
u32 stride;
size_t buf_size;
u8 *buf;
dma_addr_t buf_dma_addr;
struct net_device *ndev;
int int_index;
void (*handler) (struct mqnic_eq_ring *);
u32 hw_ptr_mask;
u8 __iomem *hw_addr;
u8 __iomem *hw_head_ptr;
u8 __iomem *hw_tail_ptr;
};
struct mqnic_priv {
struct device *dev;
struct net_device *ndev;
struct mqnic_dev *mdev;
spinlock_t stats_lock;
bool registered;
int port;
bool port_up;
u32 if_id;
u32 event_queue_count;
u32 event_queue_offset;
u32 tx_queue_count;
u32 tx_queue_offset;
u32 tx_cpl_queue_count;
u32 tx_cpl_queue_offset;
u32 rx_queue_count;
u32 rx_queue_offset;
u32 rx_cpl_queue_count;
u32 rx_cpl_queue_offset;
u32 port_count;
u32 port_offset;
u32 port_stride;
u8 __iomem *hw_addr;
u8 __iomem *csr_hw_addr;
struct mqnic_eq_ring *event_ring[MQNIC_MAX_EVENT_RINGS];
struct mqnic_ring *tx_ring[MQNIC_MAX_TX_RINGS];
struct mqnic_cq_ring *tx_cpl_ring[MQNIC_MAX_TX_CPL_RINGS];
struct mqnic_ring *rx_ring[MQNIC_MAX_RX_RINGS];
struct mqnic_cq_ring *rx_cpl_ring[MQNIC_MAX_RX_CPL_RINGS];
struct hwtstamp_config hwts_config;
};
// mqnic_main.c
extern struct mqnic_dev *mqnic_find_by_minor(unsigned minor);
// mqnic_dev.c
extern const struct file_operations mqnic_fops;
// mqnic_netdev.c
void mqnic_update_stats(struct net_device *ndev);
int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr);
void mqnic_destroy_netdev(struct net_device *ndev);
// mqnic_ptp.c
void mqnic_register_phc(struct mqnic_dev *mdev);
void mqnic_unregister_phc(struct mqnic_dev *mdev);
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring, const struct mqnic_cpl *cpl);
// mqnic_eq.c
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr);
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring, int int_index);
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring);
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring);
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring);
void mqnic_eq_read_head_ptr(struct mqnic_eq_ring *ring);
void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring);
void mqnic_arm_eq(struct mqnic_eq_ring *ring);
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring);
// mqnic_cq.c
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr);
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring, int int_index);
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring);
bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring);
bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring);
void mqnic_cq_read_head_ptr(struct mqnic_cq_ring *ring);
void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring);
void mqnic_arm_cq(struct mqnic_cq_ring *ring);
// mqnic_tx.c
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index);
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring);
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring);
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring);
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring);
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index, int napi_budget);
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
bool mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget);
void mqnic_tx_irq(struct mqnic_cq_ring *cq);
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget);
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev);
// mqnic_rx.c
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index);
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring);
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring);
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring);
void mqnic_rx_write_head_ptr(struct mqnic_ring *ring);
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index);
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index);
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring);
bool mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget);
void mqnic_rx_irq(struct mqnic_cq_ring *cq);
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget);
// mqnic_ethtool.c
extern const struct ethtool_ops mqnic_ethtool_ops;
#endif /* MQNIC_H */

158
modules/mqnic/mqnic_cq.c Normal file
View File

@ -0,0 +1,158 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#include "mqnic.h"
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
{
struct device *dev = priv->dev;
struct mqnic_cq_ring *ring;
int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
{
dev_err(dev, "Failed to allocate CQ ring");
return -ENOMEM;
}
ring->ndev = priv->ndev;
ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size-1;
ring->stride = roundup_pow_of_two(stride);
ring->buf_size = ring->size*ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf)
{
dev_err(dev, "Failed to allocate CQ ring DMA buffer");
ret = -ENOMEM;
goto fail_ring;
}
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_CPL_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_CPL_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0;
ring->tail_ptr = 0;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+4);
// set interrupt index
iowrite32(0, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring;
return 0;
fail_ring:
kfree(ring);
*ring_ptr = NULL;
return ret;
}
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr)
{
struct device *dev = priv->dev;
struct mqnic_cq_ring *ring = *ring_ptr;
*ring_ptr = NULL;
mqnic_deactivate_cq_ring(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kfree(ring);
}
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring, int int_index)
{
ring->int_index = int_index;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_CPL_QUEUE_BASE_ADDR_REG+4);
// set interrupt index
iowrite32(int_index, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_CPL_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | MQNIC_CPL_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
return 0;
}
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring)
{
// deactivate queue
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
// disarm queue
iowrite32(ring->int_index, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
}
bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring)
{
return ring->head_ptr == ring->tail_ptr;
}
bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring)
{
return ring->head_ptr - ring->tail_ptr >= ring->size;
}
void mqnic_cq_read_head_ptr(struct mqnic_cq_ring *ring)
{
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
}
void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring)
{
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
}
void mqnic_arm_cq(struct mqnic_cq_ring *ring)
{
iowrite32(ring->int_index | MQNIC_CPL_QUEUE_ARM_MASK, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
}

111
modules/mqnic/mqnic_dev.c Normal file
View File

@ -0,0 +1,111 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#include "mqnic.h"
static int mqnic_open(struct inode *inode, struct file *filp)
{
struct mqnic_dev *mqnic;
mqnic = mqnic_find_by_minor(iminor(inode));
if (mqnic == NULL)
{
pr_err("Failed to locate mqnic for minor = %u.\n", iminor(inode));
return -ENODEV;
}
filp->private_data = mqnic;
return 0;
}
static int mqnic_release(struct inode *inode, struct file *filp)
{
//struct mqnic_dev *mqnic = filp->private_data;
return 0;
}
static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *vma)
{
struct device *dev = &mqnic->pdev->dev;
size_t map_size = vma->vm_end - vma->vm_start;
int ret;
if (map_size > mqnic->hw_regs_size)
{
dev_err(dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <=%zu)", vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
return -EINVAL;
}
ret = remap_pfn_range(vma, vma->vm_start, mqnic->hw_regs_phys >> PAGE_SHIFT, map_size, pgprot_noncached(vma->vm_page_prot));
if (ret)
{
dev_err(dev, "mqnic_map_registers: remap_pfn_range failed for registers region");
}
else
{
dev_dbg(dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p", &mqnic->hw_regs_phys, (void *)vma->vm_start);
}
return ret;
}
static int mqnic_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct mqnic_dev *mqnic = filp->private_data;
struct device *dev = &mqnic->pdev->dev;
int ret;
if (vma->vm_pgoff == 0)
{
ret = mqnic_map_registers(mqnic, vma);
}
else
{
goto fail_invalid_offset;
}
return ret;
fail_invalid_offset:
dev_err(dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu", vma->vm_pgoff);
return -EINVAL;
}
const struct file_operations mqnic_fops = {
.owner = THIS_MODULE,
.open = mqnic_open,
.release = mqnic_release,
.mmap = mqnic_mmap,
};

215
modules/mqnic/mqnic_eq.c Normal file
View File

@ -0,0 +1,215 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#include "mqnic.h"
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
{
struct device *dev = priv->dev;
struct mqnic_eq_ring *ring;
int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
{
dev_err(dev, "Failed to allocate EQ ring");
return -ENOMEM;
}
ring->ndev = priv->ndev;
ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size-1;
ring->stride = roundup_pow_of_two(stride);
ring->buf_size = ring->size*ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf)
{
dev_err(dev, "Failed to allocate EQ ring DMA buffer");
ret = -ENOMEM;
goto fail_ring;
}
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0;
ring->tail_ptr = 0;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+4);
// set interrupt index
iowrite32(0, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring;
return 0;
fail_ring:
kfree(ring);
*ring_ptr = NULL;
return ret;
}
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr)
{
struct device *dev = priv->dev;
struct mqnic_eq_ring *ring = *ring_ptr;
*ring_ptr = NULL;
mqnic_deactivate_eq_ring(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kfree(ring);
}
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring, int int_index)
{
ring->int_index = int_index;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_EVENT_QUEUE_BASE_ADDR_REG+4);
// set interrupt index
iowrite32(int_index, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | MQNIC_EVENT_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
return 0;
}
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring)
{
// deactivate queue
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
// disarm queue
iowrite32(ring->int_index, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
}
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring)
{
return ring->head_ptr == ring->tail_ptr;
}
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring)
{
return ring->head_ptr - ring->tail_ptr >= ring->size;
}
void mqnic_eq_read_head_ptr(struct mqnic_eq_ring *ring)
{
ring->head_ptr += (ioread32(ring->hw_head_ptr) - ring->head_ptr) & ring->hw_ptr_mask;
}
void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring)
{
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_tail_ptr);
}
void mqnic_arm_eq(struct mqnic_eq_ring *ring)
{
iowrite32(ring->int_index | MQNIC_EVENT_QUEUE_ARM_MASK, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
}
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_event *event;
u32 eq_index;
u32 eq_tail_ptr;
int done = 0;
if (unlikely(!priv->port_up))
{
return;
}
// read head pointer from NIC
mqnic_eq_read_head_ptr(eq_ring);
eq_tail_ptr = eq_ring->tail_ptr;
eq_index = eq_tail_ptr & eq_ring->size_mask;
while (eq_ring->head_ptr != eq_tail_ptr)
{
event = (struct mqnic_event *)(eq_ring->buf + eq_index * MQNIC_EVENT_SIZE);
if (event->type == MQNIC_EVENT_TYPE_TX_CPL)
{
// transmit completion event
struct mqnic_cq_ring *cq_ring = priv->tx_cpl_ring[event->source];
if (likely(cq_ring && cq_ring->handler))
{
cq_ring->handler(cq_ring);
}
}
else if (event->type == MQNIC_EVENT_TYPE_RX_CPL)
{
// receive completion event
struct mqnic_cq_ring *cq_ring = priv->rx_cpl_ring[event->source];
if (likely(cq_ring && cq_ring->handler))
{
cq_ring->handler(cq_ring);
}
}
else
{
dev_warn(&priv->mdev->pdev->dev, "mqnic_process_eq on port %d: unknown event type %d (source %d)", priv->port, event->type, event->source);
}
done++;
eq_tail_ptr++;
eq_index = eq_tail_ptr & eq_ring->size_mask;
}
// update eq tail
eq_ring->tail_ptr = eq_tail_ptr;
mqnic_eq_write_tail_ptr(eq_ring);
}

View File

@ -0,0 +1,81 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#include "mqnic.h"
static void mqnic_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev;
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRIVER_VERSION, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d", mdev->fw_ver >> 16, mdev->fw_ver & 0xffff);
strlcpy(drvinfo->bus_info, pci_name(mdev->pdev), sizeof(drvinfo->bus_info));
}
static int mqnic_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev;
int ret;
ret = ethtool_op_get_ts_info(ndev, info);
if (ret)
return ret;
info->so_timestamping |=
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
if (mdev->ptp_clock)
info->phc_index = ptp_clock_index(mdev->ptp_clock);
return ret;
}
const struct ethtool_ops mqnic_ethtool_ops = {
.get_drvinfo = mqnic_get_drvinfo,
.get_ts_info = mqnic_get_ts_info
};

183
modules/mqnic/mqnic_hw.h Normal file
View File

@ -0,0 +1,183 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#ifndef MQNIC_HW_H
#define MQNIC_HW_H
#define MQNIC_MAX_IF 8
#define MQNIC_MAX_EVENT_RINGS 256
#define MQNIC_MAX_TX_RINGS 256
#define MQNIC_MAX_TX_CPL_RINGS 256
#define MQNIC_MAX_RX_RINGS 256
#define MQNIC_MAX_RX_CPL_RINGS 256
#define MQNIC_BOARD_ID_EXANIC_X10 0x1ce40001
#define MQNIC_BOARD_ID_ADM_PCIE_9V3 0x41440001
// NIC CSRs
#define MQNIC_REG_FW_ID 0x0000
#define MQNIC_REG_FW_VER 0x0004
#define MQNIC_REG_BOARD_ID 0x0008
#define MQNIC_REG_BOARD_VER 0x000C
#define MQNIC_REG_PHC_COUNT 0x0010
#define MQNIC_REG_PHC_OFFSET 0x0014
#define MQNIC_REG_PHC_STRIDE 0x0018
#define MQNIC_REG_IF_COUNT 0x0020
#define MQNIC_REG_IF_STRIDE 0x0024
#define MQNIC_REG_IF_CSR_OFFSET 0x002C
#define MQNIC_REG_GPIO_OUT 0x0100
#define MQNIC_REG_GPIO_IN 0x0104
#define MQNIC_PHC_REG_FEATURES 0x0000
#define MQNIC_PHC_REG_PTP_CUR_FNS 0x0010
#define MQNIC_PHC_REG_PTP_CUR_NS 0x0014
#define MQNIC_PHC_REG_PTP_CUR_SEC_L 0x0018
#define MQNIC_PHC_REG_PTP_CUR_SEC_H 0x001C
#define MQNIC_PHC_REG_PTP_GET_FNS 0x0020
#define MQNIC_PHC_REG_PTP_GET_NS 0x0024
#define MQNIC_PHC_REG_PTP_GET_SEC_L 0x0028
#define MQNIC_PHC_REG_PTP_GET_SEC_H 0x002C
#define MQNIC_PHC_REG_PTP_SET_FNS 0x0030
#define MQNIC_PHC_REG_PTP_SET_NS 0x0034
#define MQNIC_PHC_REG_PTP_SET_SEC_L 0x0038
#define MQNIC_PHC_REG_PTP_SET_SEC_H 0x003C
#define MQNIC_PHC_REG_PTP_PERIOD_FNS 0x0040
#define MQNIC_PHC_REG_PTP_PERIOD_NS 0x0044
#define MQNIC_PHC_REG_PTP_NOM_PERIOD_FNS 0x0048
#define MQNIC_PHC_REG_PTP_NOM_PERIOD_NS 0x004C
#define MQNIC_PHC_REG_PTP_ADJ_FNS 0x0050
#define MQNIC_PHC_REG_PTP_ADJ_NS 0x0054
#define MQNIC_PHC_REG_PTP_ADJ_COUNT 0x0058
#define MQNIC_PHC_REG_PTP_ADJ_ACTIVE 0x005C
#define MQNIC_PHC_REG_PEROUT_CTRL 0x0000
#define MQNIC_PHC_REG_PEROUT_STATUS 0x0004
#define MQNIC_PHC_REG_PEROUT_START_FNS 0x0010
#define MQNIC_PHC_REG_PEROUT_START_NS 0x0014
#define MQNIC_PHC_REG_PEROUT_START_SEC_L 0x0018
#define MQNIC_PHC_REG_PEROUT_START_SEC_H 0x001C
#define MQNIC_PHC_REG_PEROUT_PERIOD_FNS 0x0020
#define MQNIC_PHC_REG_PEROUT_PERIOD_NS 0x0024
#define MQNIC_PHC_REG_PEROUT_PERIOD_SEC_L 0x0028
#define MQNIC_PHC_REG_PEROUT_PERIOD_SEC_H 0x002C
#define MQNIC_PHC_REG_PEROUT_WIDTH_FNS 0x0030
#define MQNIC_PHC_REG_PEROUT_WIDTH_NS 0x0034
#define MQNIC_PHC_REG_PEROUT_WIDTH_SEC_L 0x0038
#define MQNIC_PHC_REG_PEROUT_WIDTH_SEC_H 0x003C
// Interface CSRs
#define MQNIC_IF_REG_IF_ID 0x0000
#define MQNIC_IF_REG_IF_FEATURES 0x0004
#define MQNIC_IF_REG_EVENT_QUEUE_COUNT 0x0010
#define MQNIC_IF_REG_EVENT_QUEUE_OFFSET 0x0014
#define MQNIC_IF_REG_TX_QUEUE_COUNT 0x0020
#define MQNIC_IF_REG_TX_QUEUE_OFFSET 0x0024
#define MQNIC_IF_REG_TX_CPL_QUEUE_COUNT 0x0028
#define MQNIC_IF_REG_TX_CPL_QUEUE_OFFSET 0x002C
#define MQNIC_IF_REG_RX_QUEUE_COUNT 0x0030
#define MQNIC_IF_REG_RX_QUEUE_OFFSET 0x0034
#define MQNIC_IF_REG_RX_CPL_QUEUE_COUNT 0x0038
#define MQNIC_IF_REG_RX_CPL_QUEUE_OFFSET 0x003C
#define MQNIC_IF_REG_PORT_COUNT 0x0040
#define MQNIC_IF_REG_PORT_OFFSET 0x0044
#define MQNIC_IF_REG_PORT_STRIDE 0x0048
#define MQNIC_QUEUE_STRIDE 0x00000020
#define MQNIC_CPL_QUEUE_STRIDE 0x00000020
#define MQNIC_EVENT_QUEUE_STRIDE 0x00000020
#define MQNIC_QUEUE_BASE_ADDR_REG 0x00
#define MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG 0x08
#define MQNIC_QUEUE_CPL_QUEUE_INDEX_REG 0x0C
#define MQNIC_QUEUE_HEAD_PTR_REG 0x10
#define MQNIC_QUEUE_TAIL_PTR_REG 0x18
#define MQNIC_QUEUE_ACTIVE_MASK 0x80000000
#define MQNIC_CPL_QUEUE_BASE_ADDR_REG 0x00
#define MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG 0x08
#define MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG 0x0C
#define MQNIC_CPL_QUEUE_HEAD_PTR_REG 0x10
#define MQNIC_CPL_QUEUE_TAIL_PTR_REG 0x18
#define MQNIC_CPL_QUEUE_ACTIVE_MASK 0x80000000
#define MQNIC_CPL_QUEUE_ARM_MASK 0x80000000
#define MQNIC_CPL_QUEUE_CONT_MASK 0x40000000
#define MQNIC_EVENT_QUEUE_BASE_ADDR_REG 0x00
#define MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG 0x08
#define MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG 0x0C
#define MQNIC_EVENT_QUEUE_HEAD_PTR_REG 0x10
#define MQNIC_EVENT_QUEUE_TAIL_PTR_REG 0x18
#define MQNIC_EVENT_QUEUE_ACTIVE_MASK 0x80000000
#define MQNIC_EVENT_QUEUE_ARM_MASK 0x80000000
#define MQNIC_EVENT_QUEUE_CONT_MASK 0x40000000
#define MQNIC_EVENT_TYPE_TX_CPL 0x0000
#define MQNIC_EVENT_TYPE_RX_CPL 0x0001
#define MQNIC_DESC_SIZE 16
#define MQNIC_CPL_SIZE 32
#define MQNIC_EVENT_SIZE 32
struct mqnic_desc {
u16 rsvd0;
u16 tx_csum_cmd;
u32 len;
u64 addr;
};
struct mqnic_cpl {
u16 queue;
u16 index;
u16 len;
u16 rsvd0;
u32 ts_ns;
u16 ts_s;
u16 rx_csum;
};
struct mqnic_event {
u16 type;
u16 source;
};
#endif /* MQNIC_HW_H */

483
modules/mqnic/mqnic_main.c Normal file
View File

@ -0,0 +1,483 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#include "mqnic.h"
#include <linux/module.h>
#include <linux/pci-aspm.h>
#include <linux/delay.h>
MODULE_DESCRIPTION("mqnic driver");
MODULE_AUTHOR("Alex Forencich");
MODULE_LICENSE("Dual MIT/GPL");
MODULE_VERSION(DRIVER_VERSION);
MODULE_SUPPORTED_DEVICE(DRIVER_NAME);
static const struct pci_device_id pci_ids[] = {
{ PCI_DEVICE(0x1234, 0x1001) },
{ PCI_DEVICE(0x5543, 0x1001) },
{ 0 /* end */ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
void mqnic_i2c_set_scl(void *data, int state)
{
struct mqnic_i2c_priv *priv = data;
if (state)
{
iowrite32(ioread32(priv->scl_out_reg) | priv->scl_out_mask, priv->scl_out_reg);
}
else
{
iowrite32(ioread32(priv->scl_out_reg) & ~priv->scl_out_mask, priv->scl_out_reg);
}
ioread32(priv->scl_out_reg);
}
void mqnic_i2c_set_sda(void *data, int state)
{
struct mqnic_i2c_priv *priv = data;
if (state)
{
iowrite32(ioread32(priv->sda_out_reg) | priv->sda_out_mask, priv->sda_out_reg);
}
else
{
iowrite32(ioread32(priv->sda_out_reg) & ~priv->sda_out_mask, priv->sda_out_reg);
}
ioread32(priv->sda_out_reg);
}
int mqnic_i2c_get_scl(void *data)
{
struct mqnic_i2c_priv *priv = data;
return !!(ioread32(priv->scl_in_reg) & priv->scl_in_mask);
}
int mqnic_i2c_get_sda(void *data)
{
struct mqnic_i2c_priv *priv = data;
return !!(ioread32(priv->sda_in_reg) & priv->sda_in_mask);
}
static const struct i2c_algo_bit_data mqnic_i2c_algo = {
.setsda = mqnic_i2c_set_sda,
.setscl = mqnic_i2c_set_scl,
.getsda = mqnic_i2c_get_sda,
.getscl = mqnic_i2c_get_scl,
.udelay = 5,
.timeout = 20
};
static struct i2c_board_info mqnic_eeprom_info = {
I2C_BOARD_INFO("24c02", 0x50),
};
static int mqnic_init_i2c(struct mqnic_dev *mqnic)
{
int ret = 0;
// interface i2c interfaces
// TODO
// eeprom i2c interface
mqnic->eeprom_i2c_adap.owner = THIS_MODULE;
mqnic->eeprom_i2c_priv.mqnic = mqnic;
mqnic->eeprom_i2c_priv.scl_in_reg = mqnic->hw_addr+MQNIC_REG_GPIO_IN;
mqnic->eeprom_i2c_priv.scl_out_reg = mqnic->hw_addr+MQNIC_REG_GPIO_OUT;
mqnic->eeprom_i2c_priv.sda_in_reg = mqnic->hw_addr+MQNIC_REG_GPIO_IN;
mqnic->eeprom_i2c_priv.sda_out_reg = mqnic->hw_addr+MQNIC_REG_GPIO_OUT;
mqnic->eeprom_i2c_priv.scl_in_mask = 1 << 24;
mqnic->eeprom_i2c_priv.scl_out_mask = 1 << 24;
mqnic->eeprom_i2c_priv.sda_in_mask = 1 << 25;
mqnic->eeprom_i2c_priv.sda_out_mask = 1 << 25;
mqnic->eeprom_i2c_algo = mqnic_i2c_algo;
mqnic->eeprom_i2c_algo.data = &mqnic->eeprom_i2c_priv;
mqnic->eeprom_i2c_adap.algo_data = &mqnic->eeprom_i2c_algo;
mqnic->eeprom_i2c_adap.dev.parent = &mqnic->pdev->dev;
iowrite32(ioread32(mqnic->hw_addr+MQNIC_REG_GPIO_OUT) & ~(1 << 26), mqnic->hw_addr+MQNIC_REG_GPIO_OUT); // WP disable
strlcpy(mqnic->eeprom_i2c_adap.name, "mqnic EEPROM", sizeof(mqnic->eeprom_i2c_adap.name));
ret = i2c_bit_add_bus(&mqnic->eeprom_i2c_adap);
if (ret)
{
return ret;
}
mqnic->eeprom_i2c_client = i2c_new_device(&mqnic->eeprom_i2c_adap, &mqnic_eeprom_info);
if (mqnic->eeprom_i2c_client == NULL)
{
ret = -ENODEV;
}
return ret;
}
static void mqnic_remove_i2c(struct mqnic_dev *mqnic)
{
// eeprom i2c interface
if (mqnic->eeprom_i2c_client)
{
i2c_unregister_device(mqnic->eeprom_i2c_client);
mqnic->eeprom_i2c_client = NULL;
}
if (mqnic->eeprom_i2c_adap.owner)
{
i2c_del_adapter(&mqnic->eeprom_i2c_adap);
}
memset(&mqnic->eeprom_i2c_adap, 0, sizeof(mqnic->eeprom_i2c_adap));
}
static LIST_HEAD(mqnic_devices);
static DEFINE_SPINLOCK(mqnic_devices_lock);
static unsigned int mqnic_get_free_id(void)
{
struct mqnic_dev *mqnic;
unsigned int id = 0;
bool available = false;
while (!available)
{
available = true;
list_for_each_entry(mqnic, &mqnic_devices, dev_list_node)
{
if (mqnic->id == id)
{
available = false;
id++;
break;
}
}
}
return id;
}
struct mqnic_dev *mqnic_find_by_minor(unsigned minor)
{
struct mqnic_dev *mqnic;
spin_lock(&mqnic_devices_lock);
list_for_each_entry(mqnic, &mqnic_devices, dev_list_node)
if (mqnic->misc_dev.minor == minor)
goto done;
mqnic = NULL;
done:
spin_unlock(&mqnic_devices_lock);
return mqnic;
}
static int mqnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret = 0;
struct mqnic_dev *mqnic;
struct device *dev = &pdev->dev;
int k = 0;
dev_info(dev, "mqnic probe");
if (!(mqnic = devm_kzalloc(dev, sizeof(*mqnic), GFP_KERNEL)))
{
return -ENOMEM;
}
mqnic->pdev = pdev;
pci_set_drvdata(pdev, mqnic);
mqnic->misc_dev.minor = MISC_DYNAMIC_MINOR;
// assign ID and add to list
spin_lock(&mqnic_devices_lock);
mqnic->id = mqnic_get_free_id();
list_add_tail(&mqnic->dev_list_node, &mqnic_devices);
spin_unlock(&mqnic_devices_lock);
snprintf(mqnic->name, sizeof(mqnic->name), DRIVER_NAME "%d", mqnic->id);
// Disable ASPM
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
// Enable device
ret = pci_enable_device_mem(pdev);
if (ret)
{
dev_err(dev, "Failed to enable PCI device");
goto fail_enable_device;
}
// Set mask
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
{
dev_warn(dev, "Warning: failed to set 64 bit PCI DMA mask");
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
{
dev_err(dev, "Failed to set PCI DMA mask");
goto fail_regions;
}
}
// Set max segment size
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
// Reserve regions
ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret)
{
dev_err(dev, "Failed to reserve regions");
goto fail_regions;
}
mqnic->hw_regs_size = pci_resource_end(pdev, 0) - pci_resource_start(pdev, 0) + 1;
mqnic->hw_regs_phys = pci_resource_start(pdev, 0);
// Map BAR
mqnic->hw_addr = pci_ioremap_bar(pdev, 0);
if (!mqnic->hw_addr)
{
ret = -ENOMEM;
dev_err(dev, "Failed to map BARs");
goto fail_map_bars;
}
// Check if device needs to be reset
if (ioread32(mqnic->hw_addr) == 0xffffffff)
{
ret = -EIO;
dev_err(dev, "Deivce needs to be reset");
goto fail_map_bars;
}
// Read ID registers
mqnic->fw_id = ioread32(mqnic->hw_addr+MQNIC_REG_FW_ID);
dev_info(dev, "FW ID: 0x%08x", mqnic->fw_id);
mqnic->fw_ver = ioread32(mqnic->hw_addr+MQNIC_REG_FW_VER);
dev_info(dev, "FW version: %d.%d", mqnic->fw_ver >> 16, mqnic->fw_ver & 0xffff);
mqnic->board_id = ioread32(mqnic->hw_addr+MQNIC_REG_BOARD_ID);
dev_info(dev, "Board ID: 0x%08x", mqnic->board_id);
mqnic->board_ver = ioread32(mqnic->hw_addr+MQNIC_REG_BOARD_VER);
dev_info(dev, "Board version: %d.%d", mqnic->board_ver >> 16, mqnic->board_ver & 0xffff);
mqnic->phc_count = ioread32(mqnic->hw_addr+MQNIC_REG_PHC_COUNT);
dev_info(dev, "PHC count: %d", mqnic->phc_count);
mqnic->phc_offset = ioread32(mqnic->hw_addr+MQNIC_REG_PHC_OFFSET);
dev_info(dev, "PHC offset: 0x%08x", mqnic->phc_offset);
mqnic->phc_hw_addr = mqnic->hw_addr+mqnic->phc_offset;
mqnic->if_count = ioread32(mqnic->hw_addr+MQNIC_REG_IF_COUNT);
dev_info(dev, "IF count: %d", mqnic->if_count);
mqnic->if_stride = ioread32(mqnic->hw_addr+MQNIC_REG_IF_STRIDE);
dev_info(dev, "IF stride: 0x%08x", mqnic->if_stride);
mqnic->if_csr_offset = ioread32(mqnic->hw_addr+MQNIC_REG_IF_CSR_OFFSET);
dev_info(dev, "IF CSR offset: 0x%08x", mqnic->if_csr_offset);
// Allocate MSI IRQs
ret = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (ret < 0)
{
dev_err(dev, "Failed to allocate IRQs");
goto fail_map_bars;
}
// Set up I2C interfaces
ret = mqnic_init_i2c(mqnic);
if (ret)
{
dev_err(dev, "Failed to register I2C interfaces");
goto fail_i2c;
}
// Read MAC from EEPROM
if (mqnic->eeprom_i2c_client)
{
ret = i2c_smbus_read_i2c_block_data(mqnic->eeprom_i2c_client, 0x00, 6, mqnic->base_mac);
if (ret < 0)
{
dev_warn(dev, "Failed to read MAC from EEPROM");
}
}
else
{
dev_warn(dev, "Failed to read MAC from EEPROM; no EEPROM I2C client registered");
}
// Enable bus mastering for DMA
pci_set_master(pdev);
// register PHC
if (mqnic->phc_count)
{
mqnic_register_phc(mqnic);
}
// Set up interfaces
if (mqnic->if_count > MQNIC_MAX_IF)
mqnic->if_count = MQNIC_MAX_IF;
for (k = 0; k < mqnic->if_count; k++)
{
dev_info(dev, "Creating interface %d", k);
ret = mqnic_init_netdev(mqnic, k, mqnic->hw_addr + k*mqnic->if_stride);
if (ret)
{
dev_err(dev, "Failed to create net_device");
goto fail_init_netdev;
}
}
mqnic->misc_dev.name = mqnic->name;
mqnic->misc_dev.fops = &mqnic_fops;
ret = misc_register(&mqnic->misc_dev);
if (ret)
{
dev_err(dev, "misc_register failed: %d\n", ret);
goto fail_miscdev;
}
pci_save_state(pdev);
// probe complete
return 0;
// error handling
fail_miscdev:
fail_init_netdev:
for (k = 0; k < MQNIC_MAX_IF; k++)
{
if (mqnic->ndev[k])
{
mqnic_destroy_netdev(mqnic->ndev[k]);
}
}
mqnic_unregister_phc(mqnic);
pci_clear_master(pdev);
fail_i2c:
mqnic_remove_i2c(mqnic);
pci_free_irq_vectors(pdev);
fail_map_bars:
pci_iounmap(pdev, mqnic->hw_addr);
pci_release_regions(pdev);
fail_regions:
pci_disable_device(pdev);
fail_enable_device:
spin_lock(&mqnic_devices_lock);
list_del(&mqnic->dev_list_node);
spin_unlock(&mqnic_devices_lock);
return ret;
}
static void mqnic_remove(struct pci_dev *pdev)
{
struct mqnic_dev *mqnic;
struct device *dev = &pdev->dev;
int k = 0;
dev_info(dev, "mqnic remove");
if (!(mqnic = pci_get_drvdata(pdev))) {
return;
}
misc_deregister(&mqnic->misc_dev);
spin_lock(&mqnic_devices_lock);
list_del(&mqnic->dev_list_node);
spin_unlock(&mqnic_devices_lock);
for (k = 0; k < MQNIC_MAX_IF; k++)
{
if (mqnic->ndev[k])
{
mqnic_destroy_netdev(mqnic->ndev[k]);
}
}
mqnic_unregister_phc(mqnic);
pci_clear_master(pdev);
mqnic_remove_i2c(mqnic);
pci_free_irq_vectors(pdev);
pci_iounmap(pdev, mqnic->hw_addr);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static void mqnic_shutdown(struct pci_dev *pdev)
{
struct mqnic_dev *mqnic = pci_get_drvdata(pdev);
struct device *dev = &pdev->dev;
dev_info(dev, "mqnic shutdown");
if (!mqnic) {
return;
}
// ensure DMA is disabled on shutdown
pci_clear_master(pdev);
}
static struct pci_driver pci_driver = {
.name = DRIVER_NAME,
.id_table = pci_ids,
.probe = mqnic_probe,
.remove = mqnic_remove,
.shutdown = mqnic_shutdown
};
static int __init mqnic_init(void)
{
return pci_register_driver(&pci_driver);
}
static void __exit mqnic_exit(void)
{
pci_unregister_driver(&pci_driver);
}
module_init(mqnic_init);
module_exit(mqnic_exit);

View File

@ -0,0 +1,597 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#include "mqnic.h"
static int mqnic_open(struct net_device *ndev)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev;
int k;
dev_info(&mdev->pdev->dev, "mqnic_open on port %d", priv->port);
// set up event queues
for (k = 0; k < priv->event_queue_count; k++)
{
mqnic_activate_eq_ring(priv, priv->event_ring[k], priv->port); // TODO interrupt index
mqnic_arm_eq(priv->event_ring[k]);
}
// set up RX completion queues
for (k = 0; k < priv->rx_cpl_queue_count; k++)
{
mqnic_activate_cq_ring(priv, priv->rx_cpl_ring[k], 0); // TODO configure/constant
priv->rx_cpl_ring[k]->ring_index = k;
priv->rx_cpl_ring[k]->handler = mqnic_rx_irq;
netif_napi_add(ndev, &priv->rx_cpl_ring[k]->napi, mqnic_poll_rx_cq, NAPI_POLL_WEIGHT);
napi_enable(&priv->rx_cpl_ring[k]->napi);
mqnic_arm_cq(priv->rx_cpl_ring[k]);
}
// set up RX queues
for (k = 0; k < priv->rx_queue_count; k++)
{
mqnic_activate_rx_ring(priv, priv->rx_ring[k], k);
}
// set up TX completion queues
for (k = 0; k < priv->tx_cpl_queue_count; k++)
{
mqnic_activate_cq_ring(priv, priv->tx_cpl_ring[k], 0); // TODO configure/constant
priv->tx_cpl_ring[k]->ring_index = k;
priv->tx_cpl_ring[k]->handler = mqnic_tx_irq;
netif_tx_napi_add(ndev, &priv->tx_cpl_ring[k]->napi, mqnic_poll_tx_cq, NAPI_POLL_WEIGHT);
napi_enable(&priv->tx_cpl_ring[k]->napi);
mqnic_arm_cq(priv->tx_cpl_ring[k]);
}
// set up TX queues
for (k = 0; k < priv->tx_queue_count; k++)
{
mqnic_activate_tx_ring(priv, priv->tx_ring[k], k);
priv->tx_ring[k]->tx_queue = netdev_get_tx_queue(ndev, k);
}
priv->port_up = true;
netif_tx_start_all_queues(ndev);
netif_device_attach(ndev);
//netif_carrier_off(ndev);
netif_carrier_on(ndev); // TODO link status monitoring
return 0;
}
static int mqnic_close(struct net_device *ndev)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev;
int k;
dev_info(&mdev->pdev->dev, "mqnic_close on port %d", priv->port);
netif_tx_lock_bh(ndev);
// if (detach)
// netif_device_detach(ndev);
netif_tx_stop_all_queues(ndev);
netif_tx_unlock_bh(ndev);
netif_tx_disable(ndev);
spin_lock_bh(&priv->stats_lock);
mqnic_update_stats(ndev);
priv->port_up = false;
spin_unlock_bh(&priv->stats_lock);
// deactivate TX queues
for (k = 0; k < priv->tx_queue_count; k++)
{
mqnic_deactivate_tx_ring(priv, priv->tx_ring[k]);
}
// deactivate TX completion queues
for (k = 0; k < priv->tx_cpl_queue_count; k++)
{
mqnic_deactivate_cq_ring(priv, priv->tx_cpl_ring[k]);
napi_disable(&priv->tx_cpl_ring[k]->napi);
netif_napi_del(&priv->tx_cpl_ring[k]->napi);
}
// deactivate RX queues
for (k = 0; k < priv->rx_queue_count; k++)
{
mqnic_deactivate_rx_ring(priv, priv->rx_ring[k]);
}
// deactivate RX completion queues
for (k = 0; k < priv->rx_cpl_queue_count; k++)
{
mqnic_deactivate_cq_ring(priv, priv->rx_cpl_ring[k]);
napi_disable(&priv->rx_cpl_ring[k]->napi);
netif_napi_del(&priv->rx_cpl_ring[k]->napi);
}
// deactivate event queues
for (k = 0; k < priv->event_queue_count; k++)
{
mqnic_deactivate_eq_ring(priv, priv->event_ring[k]);
}
msleep(10);
// free descriptors in TX queues
for (k = 0; k < priv->tx_queue_count; k++)
{
mqnic_free_tx_buf(priv, priv->tx_ring[k]);
}
// free descriptors in RX queues
for (k = 0; k < priv->rx_queue_count; k++)
{
mqnic_free_rx_buf(priv, priv->rx_ring[k]);
}
netif_carrier_off(ndev);
return 0;
}
void mqnic_update_stats(struct net_device *ndev)
{
struct mqnic_priv *priv = netdev_priv(ndev);
unsigned long packets, bytes;
int k;
if (unlikely(!priv->port_up))
return;
packets = 0;
bytes = 0;
for (k = 0; k < priv->rx_queue_count; k++)
{
const struct mqnic_ring *ring = priv->rx_ring[k];
packets += READ_ONCE(ring->packets);
bytes += READ_ONCE(ring->bytes);
}
ndev->stats.rx_packets = packets;
ndev->stats.rx_bytes = bytes;
packets = 0;
bytes = 0;
for (k = 0; k < priv->tx_queue_count; k++)
{
const struct mqnic_ring *ring = priv->tx_ring[k];
packets += READ_ONCE(ring->packets);
bytes += READ_ONCE(ring->bytes);
}
ndev->stats.tx_packets = packets;
ndev->stats.tx_bytes = bytes;
}
static void mqnic_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats)
{
struct mqnic_priv *priv = netdev_priv(ndev);
spin_lock_bh(&priv->stats_lock);
mqnic_update_stats(ndev);
netdev_stats_to_stats64(stats, &ndev->stats);
spin_unlock_bh(&priv->stats_lock);
}
static int mqnic_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct hwtstamp_config hwts_config;
if (copy_from_user(&hwts_config, ifr->ifr_data, sizeof(hwts_config)))
{
return -EFAULT;
}
if (hwts_config.flags)
{
return -EINVAL;
}
switch (hwts_config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
switch (hwts_config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
hwts_config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
memcpy(&priv->hwts_config, &hwts_config, sizeof(hwts_config));
if (copy_to_user(ifr->ifr_data, &hwts_config, sizeof(hwts_config)))
{
return -EFAULT;
}
else
{
return 0;
}
}
static int mqnic_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
{
struct mqnic_priv *priv = netdev_priv(ndev);
if (copy_to_user(ifr->ifr_data, &priv->hwts_config, sizeof(priv->hwts_config)))
{
return -EFAULT;
}
else
{
return 0;
}
}
static int mqnic_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
return mqnic_hwtstamp_set(ndev, ifr);
case SIOCGHWTSTAMP:
return mqnic_hwtstamp_get(ndev, ifr);
default:
return -EOPNOTSUPP;
}
}
static const struct net_device_ops mqnic_netdev_ops = {
.ndo_open = mqnic_open,
.ndo_stop = mqnic_close,
.ndo_start_xmit = mqnic_start_xmit,
.ndo_get_stats64 = mqnic_get_stats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mqnic_ioctl,
};
static irqreturn_t mqnic_netdev_interrupt(int irq, void *data)
{
struct mqnic_priv *priv = data;
int k;
if (likely(priv->port_up))
{
for (k = 0; k < priv->event_queue_count; k++)
{
if (likely(priv->event_ring[k]))
{
mqnic_process_eq(priv->ndev, priv->event_ring[k]);
mqnic_arm_eq(priv->event_ring[k]);
}
}
}
return IRQ_HANDLED;
}
int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
{
struct device *dev = &mdev->pdev->dev;
struct net_device *ndev;
struct mqnic_priv *priv;
int ret = 0;
int k;
ndev = alloc_etherdev_mqs(sizeof(*priv), MQNIC_MAX_TX_RINGS, MQNIC_MAX_RX_RINGS);
if (!ndev)
{
return -ENOMEM;
}
SET_NETDEV_DEV(ndev, &mdev->pdev->dev);
ndev->dev_port = port;
// init private data
priv = netdev_priv(ndev);
memset(priv, 0, sizeof(struct mqnic_priv));
spin_lock_init(&priv->stats_lock);
priv->ndev = ndev;
priv->mdev = mdev;
priv->dev = dev;
priv->port = port;
priv->port_up = false;
priv->hw_addr = hw_addr;
priv->csr_hw_addr = hw_addr+mdev->if_csr_offset;
// read ID registers
priv->if_id = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_IF_ID);
dev_info(dev, "IF ID: 0x%08x", priv->if_id);
priv->event_queue_count = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_EVENT_QUEUE_COUNT);
dev_info(dev, "Event queue count: %d", priv->event_queue_count);
priv->event_queue_offset = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_EVENT_QUEUE_OFFSET);
dev_info(dev, "Event queue offset: 0x%08x", priv->event_queue_offset);
priv->tx_queue_count = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_TX_QUEUE_COUNT);
dev_info(dev, "TX queue count: %d", priv->tx_queue_count);
priv->tx_queue_offset = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_TX_QUEUE_OFFSET);
dev_info(dev, "TX queue offset: 0x%08x", priv->tx_queue_offset);
priv->tx_cpl_queue_count = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_TX_CPL_QUEUE_COUNT);
dev_info(dev, "TX completion queue count: %d", priv->tx_cpl_queue_count);
priv->tx_cpl_queue_offset = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_TX_CPL_QUEUE_OFFSET);
dev_info(dev, "TX completion queue offset: 0x%08x", priv->tx_cpl_queue_offset);
priv->rx_queue_count = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_RX_QUEUE_COUNT);
dev_info(dev, "RX queue count: %d", priv->rx_queue_count);
priv->rx_queue_offset = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_RX_QUEUE_OFFSET);
dev_info(dev, "RX queue offset: 0x%08x", priv->rx_queue_offset);
priv->rx_cpl_queue_count = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_RX_CPL_QUEUE_COUNT);
dev_info(dev, "RX completion queue count: %d", priv->rx_cpl_queue_count);
priv->rx_cpl_queue_offset = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_RX_CPL_QUEUE_OFFSET);
dev_info(dev, "RX completion queue offset: 0x%08x", priv->rx_cpl_queue_offset);
priv->port_count = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_PORT_COUNT);
dev_info(dev, "Port count: %d", priv->port_count);
priv->port_offset = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_PORT_OFFSET);
dev_info(dev, "Port offset: 0x%08x", priv->port_offset);
priv->port_stride = ioread32(priv->csr_hw_addr+MQNIC_IF_REG_PORT_STRIDE);
dev_info(dev, "Port stride: 0x%08x", priv->port_stride);
if (priv->event_queue_count > MQNIC_MAX_EVENT_RINGS)
priv->event_queue_count = MQNIC_MAX_EVENT_RINGS;
if (priv->tx_queue_count > MQNIC_MAX_TX_RINGS)
priv->tx_queue_count = MQNIC_MAX_TX_RINGS;
if (priv->tx_cpl_queue_count > MQNIC_MAX_TX_CPL_RINGS)
priv->tx_cpl_queue_count = MQNIC_MAX_TX_CPL_RINGS;
if (priv->rx_queue_count > MQNIC_MAX_RX_RINGS)
priv->rx_queue_count = MQNIC_MAX_RX_RINGS;
if (priv->rx_cpl_queue_count > MQNIC_MAX_RX_CPL_RINGS)
priv->rx_cpl_queue_count = MQNIC_MAX_RX_CPL_RINGS;
// TODO use all queues
priv->event_queue_count = 1;
priv->tx_queue_count = 1;
priv->tx_cpl_queue_count = 1;
priv->rx_queue_count = 1;
priv->rx_cpl_queue_count = 1;
netif_set_real_num_tx_queues(ndev, priv->tx_queue_count);
netif_set_real_num_rx_queues(ndev, priv->rx_queue_count);
// Set up interrupt
ret = pci_request_irq(mdev->pdev, priv->port, mqnic_netdev_interrupt, 0, priv, "mqnic%d", priv->port);
if (ret < 0)
{
dev_err(dev, "Failed to request IRQ");
free_netdev(ndev);
return ret;
}
// set MAC
ndev->addr_len = ETH_ALEN;
memcpy(ndev->dev_addr, mdev->base_mac, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
{
dev_warn(dev, "Bad MAC in EEPROM; using random MAC");
eth_hw_addr_random(ndev);
}
else
{
ndev->dev_addr[ETH_ALEN-1] += port;
}
priv->hwts_config.flags = 0;
priv->hwts_config.tx_type = HWTSTAMP_TX_OFF;
priv->hwts_config.rx_filter = HWTSTAMP_FILTER_NONE;
// allocate rings
for (k = 0; k < priv->event_queue_count; k++)
{
ret = mqnic_create_eq_ring(priv, &priv->event_ring[k], 1024, MQNIC_EVENT_SIZE, k, hw_addr+priv->event_queue_offset+k*MQNIC_EVENT_QUEUE_STRIDE); // TODO configure/constant
if (ret)
{
goto fail;
}
}
for (k = 0; k < priv->tx_queue_count; k++)
{
ret = mqnic_create_tx_ring(priv, &priv->tx_ring[k], 1024, MQNIC_DESC_SIZE, k, hw_addr+priv->tx_queue_offset+k*MQNIC_QUEUE_STRIDE); // TODO configure/constant
if (ret)
{
goto fail;
}
}
for (k = 0; k < priv->tx_cpl_queue_count; k++)
{
ret = mqnic_create_cq_ring(priv, &priv->tx_cpl_ring[k], 1024, MQNIC_CPL_SIZE, k, hw_addr+priv->tx_cpl_queue_offset+k*MQNIC_CPL_QUEUE_STRIDE); // TODO configure/constant
if (ret)
{
goto fail;
}
}
for (k = 0; k < priv->rx_queue_count; k++)
{
ret = mqnic_create_rx_ring(priv, &priv->rx_ring[k], 1024, MQNIC_DESC_SIZE, k, hw_addr+priv->rx_queue_offset+k*MQNIC_QUEUE_STRIDE); // TODO configure/constant
if (ret)
{
goto fail;
}
}
for (k = 0; k < priv->rx_cpl_queue_count; k++)
{
ret = mqnic_create_cq_ring(priv, &priv->rx_cpl_ring[k], 1024, MQNIC_CPL_SIZE, k, hw_addr+priv->rx_cpl_queue_offset+k*MQNIC_CPL_QUEUE_STRIDE); // TODO configure/constant
if (ret)
{
goto fail;
}
}
// scheduler queue enable
iowrite32(0xffffffff, hw_addr+priv->port_offset+0x0200);
// scheduler global enable
iowrite32(0xffffffff, hw_addr+priv->port_offset+0x0300);
// entry points
ndev->netdev_ops = &mqnic_netdev_ops;
ndev->ethtool_ops = &mqnic_ethtool_ops;
// set up features
ndev->hw_features = 0;
if (1) // TODO check flag
{
ndev->hw_features |= NETIF_F_RXCSUM;
}
if (0) // TODO check flag
{
ndev->hw_features |= NETIF_F_HW_CSUM;
}
ndev->features = ndev->hw_features | NETIF_F_HIGHDMA;
ndev->hw_features |= 0;
ndev->min_mtu = ETH_MIN_MTU;
ndev->max_mtu = 2048; // TODO
netif_carrier_off(ndev);
ret = register_netdev(ndev);
if (ret)
{
dev_err(dev, "netdev registration failed on port %d", port);
goto fail;
}
priv->registered = 1;
mdev->ndev[port] = ndev;
return 0;
fail:
mqnic_destroy_netdev(ndev);
return ret;
}
void mqnic_destroy_netdev(struct net_device *ndev)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_dev *mdev = priv->mdev;
int k;
if (priv->registered)
{
unregister_netdev(ndev);
}
mdev->ndev[priv->port] = NULL;
// free rings
for (k = 0; k < MQNIC_MAX_EVENT_RINGS; k++)
{
if (priv->event_ring[k])
{
mqnic_destroy_eq_ring(priv, &priv->event_ring[k]);
}
}
for (k = 0; k < MQNIC_MAX_TX_RINGS; k++)
{
if (priv->tx_ring[k])
{
mqnic_destroy_tx_ring(priv, &priv->tx_ring[k]);
}
}
for (k = 0; k < MQNIC_MAX_TX_CPL_RINGS; k++)
{
if (priv->tx_cpl_ring[k])
{
mqnic_destroy_cq_ring(priv, &priv->tx_cpl_ring[k]);
}
}
for (k = 0; k < MQNIC_MAX_RX_RINGS; k++)
{
if (priv->rx_ring[k])
{
mqnic_destroy_rx_ring(priv, &priv->rx_ring[k]);
}
}
for (k = 0; k < MQNIC_MAX_RX_CPL_RINGS; k++)
{
if (priv->rx_cpl_ring[k])
{
mqnic_destroy_cq_ring(priv, &priv->rx_cpl_ring[k]);
}
}
pci_free_irq(mdev->pdev, priv->port, priv);
free_netdev(ndev);
}

304
modules/mqnic/mqnic_ptp.c Normal file
View File

@ -0,0 +1,304 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#include "mqnic.h"
#include <linux/version.h>
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring, const struct mqnic_cpl *cpl)
{
u64 ts_s = cpl->ts_s;
u32 ts_ns = cpl->ts_ns;
if (unlikely(!ring->ts_valid || (ring->ts_s ^ ts_s) & 0xff00))
{
// seconds MSBs do not match, update cached timestamp
ring->ts_s = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_CUR_SEC_L);
ring->ts_s |= (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_CUR_SEC_H) << 32;
ring->ts_valid = 1;
}
ts_s |= ring->ts_s & 0xffffffffffffff00;
return ktime_set(ts_s, ts_ns);
}
static int mqnic_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
bool neg = false;
u64 nom_per_fns, adj;
dev_info(&mdev->pdev->dev, "mqnic_phc_adjfine scaled_ppm: %ld", scaled_ppm);
if (scaled_ppm < 0)
{
neg = true;
scaled_ppm = -scaled_ppm;
}
nom_per_fns = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_NOM_PERIOD_FNS);
nom_per_fns = (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_NOM_PERIOD_NS) << 32;
if (nom_per_fns == 0)
nom_per_fns = 0x4ULL << 32;
adj = div_u64(((nom_per_fns >> 16) * scaled_ppm) + 500000, 1000000);
if (neg)
{
adj = nom_per_fns - adj;
}
else
{
adj = nom_per_fns + adj;
}
iowrite32(adj & 0xffffffff, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_PERIOD_FNS);
iowrite32(adj >> 32, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_PERIOD_NS);
dev_info(&mdev->pdev->dev, "mqnic_phc_adjfine adj: 0x%llx", adj);
return 0;
}
static int mqnic_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_FNS);
ts->tv_nsec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_NS);
ts->tv_sec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_L);
ts->tv_sec |= (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_H) << 32;
return 0;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)
static int mqnic_phc_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
ptp_read_system_prets(sts);
ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_FNS);
ptp_read_system_postts(sts);
ts->tv_nsec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_NS);
ts->tv_sec = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_L);
ts->tv_sec |= (u64)ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_GET_SEC_H) << 32;
return 0;
}
#endif
static int mqnic_phc_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
iowrite32(0, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_FNS);
iowrite32(ts->tv_nsec, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_NS);
iowrite32(ts->tv_sec & 0xffffffff, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_SEC_L);
iowrite32(ts->tv_sec >> 32, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_SET_SEC_H);
return 0;
}
static int mqnic_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
struct timespec64 ts;
dev_info(&mdev->pdev->dev, "mqnic_phc_adjtime delta: %lld", delta);
if (delta > 1000000000 || delta < -1000000000)
{
mqnic_phc_gettime(ptp, &ts);
ts = timespec64_add(ts, ns_to_timespec64(delta));
mqnic_phc_settime(ptp, &ts);
}
else
{
iowrite32(0, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_ADJ_FNS);
iowrite32(delta & 0xffffffff, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_ADJ_NS);
iowrite32(1, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_ADJ_COUNT);
}
return 0;
}
static int mqnic_phc_perout(struct ptp_clock_info *ptp, int on, struct ptp_perout_request *perout)
{
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
u8 __iomem *hw_addr;
u64 start_sec, period_sec, width_sec;
u32 start_nsec, period_nsec, width_nsec;
if (perout->index >= mdev->ptp_clock_info.n_per_out)
{
return -EINVAL;
}
hw_addr = mdev->phc_hw_addr + 0x60;
if (!on)
{
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_CTRL);
return 0;
}
start_nsec = perout->start.nsec;
start_sec = start_nsec / NSEC_PER_SEC;
start_nsec -= start_sec * NSEC_PER_SEC;
start_sec += perout->start.sec;
period_nsec = perout->period.nsec;
period_sec = period_nsec / NSEC_PER_SEC;
period_nsec -= period_sec * NSEC_PER_SEC;
period_sec += perout->period.sec;
// set width to half of period
width_sec = period_sec >> 1;
width_nsec = (period_nsec + (period_sec & 1 ? NSEC_PER_SEC : 0)) >> 1;
dev_info(&mdev->pdev->dev, "mqnic_phc_perout start: %lld.%09d", start_sec, start_nsec);
dev_info(&mdev->pdev->dev, "mqnic_phc_perout period: %lld.%09d", period_sec, period_nsec);
dev_info(&mdev->pdev->dev, "mqnic_phc_perout width: %lld.%09d", width_sec, width_nsec);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_START_FNS);
iowrite32(start_nsec, hw_addr+MQNIC_PHC_REG_PEROUT_START_NS);
iowrite32(start_sec & 0xffffffff, hw_addr+MQNIC_PHC_REG_PEROUT_START_SEC_L);
iowrite32(start_sec >> 32, hw_addr+MQNIC_PHC_REG_PEROUT_START_SEC_H);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_FNS);
iowrite32(period_nsec, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_NS);
iowrite32(period_sec & 0xffffffff, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_SEC_L);
iowrite32(period_sec >> 32, hw_addr+MQNIC_PHC_REG_PEROUT_PERIOD_SEC_H);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_FNS);
iowrite32(width_nsec, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_NS);
iowrite32(width_sec & 0xffffffff, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_SEC_L);
iowrite32(width_sec >> 32, hw_addr+MQNIC_PHC_REG_PEROUT_WIDTH_SEC_H);
iowrite32(1, hw_addr+MQNIC_PHC_REG_PEROUT_CTRL);
return 0;
}
static int mqnic_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
{
if (request)
{
switch (request->type)
{
case PTP_CLK_REQ_EXTTS:
return -EINVAL;
case PTP_CLK_REQ_PEROUT:
return mqnic_phc_perout(ptp, on, &request->perout);
case PTP_CLK_REQ_PPS:
return -EINVAL;
default:
return -EINVAL;
}
}
else
{
return -EINVAL;
}
}
void mqnic_phc_set_from_system_clock(struct ptp_clock_info *ptp)
{
struct timespec64 ts;
#ifdef ktime_get_clocktai_ts64
ktime_get_clocktai_ts64(&ts);
#else
ts = ktime_to_timespec64(ktime_get_clocktai());
#endif
mqnic_phc_settime(ptp, &ts);
}
void mqnic_register_phc(struct mqnic_dev *mdev)
{
u32 phc_features;
if (mdev->ptp_clock)
{
return;
}
phc_features = ioread32(mdev->phc_hw_addr+MQNIC_PHC_REG_FEATURES);
mdev->ptp_clock_info.owner = THIS_MODULE;
mdev->ptp_clock_info.max_adj = 10000000,
mdev->ptp_clock_info.n_alarm = 0,
mdev->ptp_clock_info.n_ext_ts = 0,
mdev->ptp_clock_info.n_per_out = phc_features & 0xff,
mdev->ptp_clock_info.n_pins = 0,
mdev->ptp_clock_info.pps = 0,
mdev->ptp_clock_info.adjfine = mqnic_phc_adjfine,
mdev->ptp_clock_info.adjtime = mqnic_phc_adjtime,
mdev->ptp_clock_info.gettime64 = mqnic_phc_gettime,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)
mdev->ptp_clock_info.gettimex64 = mqnic_phc_gettimex,
#endif
mdev->ptp_clock_info.settime64 = mqnic_phc_settime,
mdev->ptp_clock_info.enable = mqnic_phc_enable,
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, &mdev->pdev->dev);
if (IS_ERR(mdev->ptp_clock))
{
mdev->ptp_clock = NULL;
dev_err(&mdev->pdev->dev, "ptp_clock_register failed");
}
else
{
dev_info(&mdev->pdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock));
mqnic_phc_set_from_system_clock(&mdev->ptp_clock_info);
}
}
void mqnic_unregister_phc(struct mqnic_dev *mdev)
{
if (mdev->ptp_clock)
{
ptp_clock_unregister(mdev->ptp_clock);
mdev->ptp_clock = NULL;
dev_info(&mdev->pdev->dev, "unregistered PHC");
}
}

386
modules/mqnic/mqnic_rx.c Normal file
View File

@ -0,0 +1,386 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#include "mqnic.h"
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
{
struct device *dev = priv->dev;
struct mqnic_ring *ring;
int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
{
dev_err(dev, "Failed to allocate RX ring");
return -ENOMEM;
}
ring->size = roundup_pow_of_two(size);
ring->size_mask = ring->size-1;
ring->stride = roundup_pow_of_two(stride);
ring->rx_info = kvzalloc(sizeof(*ring->rx_info)*ring->size, GFP_KERNEL);
if (!ring->rx_info)
{
dev_err(dev, "Failed to allocate rx_info");
ret = -ENOMEM;
goto fail_ring;
}
ring->buf_size = ring->size*ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf)
{
dev_err(dev, "Failed to allocate RX ring DMA buffer");
ret = -ENOMEM;
goto fail_info;
}
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->clean_tail_ptr = 0;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4);
// set completion queue index
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring;
return 0;
fail_info:
kvfree(ring->rx_info);
ring->rx_info = NULL;
fail_ring:
kfree(ring);
*ring_ptr = NULL;
return ret;
}
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr)
{
struct device *dev = priv->dev;
struct mqnic_ring *ring = *ring_ptr;
*ring_ptr = NULL;
mqnic_deactivate_rx_ring(priv, ring);
mqnic_free_rx_buf(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kvfree(ring->rx_info);
ring->rx_info = NULL;
kfree(ring);
}
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index)
{
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4);
// set completion queue index
iowrite32(cpl_index, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | MQNIC_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
mqnic_refill_rx_buffers(priv, ring);
return 0;
}
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
{
// deactivate queue
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
}
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring)
{
return ring->head_ptr == ring->clean_tail_ptr;
}
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring)
{
return ring->head_ptr - ring->clean_tail_ptr >= ring->size;
}
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring)
{
ring->tail_ptr += (ioread32(ring->hw_tail_ptr) - ring->tail_ptr) & ring->hw_ptr_mask;
}
void mqnic_rx_write_head_ptr(struct mqnic_ring *ring)
{
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
}
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index)
{
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
dma_unmap_single(priv->dev, rx_info->dma_addr, rx_info->len, PCI_DMA_FROMDEVICE);
rx_info->dma_addr = 0;
napi_consume_skb(rx_info->skb, 0);
rx_info->skb = NULL;
}
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
{
u32 index;
int cnt = 0;
while (!mqnic_is_rx_ring_empty(ring))
{
index = ring->clean_tail_ptr & ring->size_mask;
mqnic_free_rx_desc(priv, ring, index);
ring->clean_tail_ptr++;
cnt++;
}
return cnt;
}
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index)
{
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
struct mqnic_desc *rx_desc = (struct mqnic_desc *)(ring->buf + index * sizeof(*rx_desc));
struct sk_buff *skb = rx_info->skb;
rx_info->len = 2048;
if (skb)
{
// skb has not been processed yet
return -1;
}
skb = __netdev_alloc_skb_ip_align(priv->ndev, 2048, GFP_ATOMIC);
if (!skb)
{
dev_err(&priv->mdev->pdev->dev, "mqnic_prepare_rx_desc failed to allocate skb on port %d", priv->port);
return -1;
}
rx_info->skb = skb;
map_skb:
// map skb
rx_info->dma_addr = dma_map_single(priv->dev, skb->data, rx_info->len, PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(priv->dev, rx_info->dma_addr)))
{
dev_err(&priv->mdev->pdev->dev, "mqnic_prepare_rx_desc failed to map skb on port %d", priv->port);
napi_consume_skb(rx_info->skb, 0);
return -1;
}
// write descriptor
rx_desc->len = rx_info->len;
rx_desc->addr = rx_info->dma_addr;
return 0;
}
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring)
{
u32 missing = ring->size - (ring->head_ptr - ring->clean_tail_ptr);
if (missing < 8)
return;
for ( ; missing-- > 0; )
{
if (mqnic_prepare_rx_desc(priv, ring, ring->head_ptr & ring->size_mask))
break;
ring->head_ptr++;
}
// enqueue on NIC
dma_wmb();
mqnic_rx_write_head_ptr(ring);
}
bool mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring = priv->rx_ring[cq_ring->ring_index];
struct mqnic_rx_info *rx_info;
struct mqnic_cpl *cpl;
struct sk_buff *skb;
u32 cq_index;
u32 cq_tail_ptr;
u32 ring_index;
u32 ring_clean_tail_ptr;
int done = 0;
int budget = napi_budget;
if (unlikely(!priv->port_up))
{
return true;
}
// process completion queue
// read head pointer from NIC
mqnic_cq_read_head_ptr(cq_ring);
cq_tail_ptr = cq_ring->tail_ptr;
cq_index = cq_tail_ptr & cq_ring->size_mask;
mb(); // is a barrier here necessary? If so, what kind?
while (cq_ring->head_ptr != cq_tail_ptr && done < budget)
{
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * MQNIC_CPL_SIZE);
ring_index = cpl->index & ring->size_mask;
rx_info = &ring->rx_info[ring_index];
skb = rx_info->skb;
// set length
if (cpl->len <= rx_info->len)
{
skb_put(skb, cpl->len);
}
skb->protocol = eth_type_trans(skb, priv->ndev);
// RX hardware timestamp
skb_hwtstamps(skb)->hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
skb_record_rx_queue(skb, cq_ring->ring_index);
// RX hardware checksum
if ((ndev->features & NETIF_F_RXCSUM) &&
(skb->protocol == htons(ETH_P_IP) || skb->protocol == htons(ETH_P_IPV6)) &&
(skb->len >= 64)) {
skb->csum = be16_to_cpu(cpl->rx_csum);
skb->ip_summed = CHECKSUM_COMPLETE;
}
// unmap
dma_unmap_single(priv->dev, rx_info->dma_addr, rx_info->len, PCI_DMA_FROMDEVICE);
rx_info->dma_addr = 0;
// hand off SKB
napi_gro_receive(&cq_ring->napi, skb);
rx_info->skb = NULL;
ring->packets++;
ring->bytes += cpl->len;
done++;
cq_tail_ptr++;
cq_index = cq_tail_ptr & cq_ring->size_mask;
}
// update CQ tail
cq_ring->tail_ptr = cq_tail_ptr;
mqnic_cq_write_tail_ptr(cq_ring);
// process ring
// read tail pointer from NIC
mqnic_rx_read_tail_ptr(ring);
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
ring_index = ring_clean_tail_ptr & ring->size_mask;
while (ring_clean_tail_ptr != ring->tail_ptr)
{
rx_info = &ring->rx_info[ring_index];
if (rx_info->skb)
break;
ring_clean_tail_ptr++;
ring_index = ring_clean_tail_ptr & ring->size_mask;
}
// update ring tail
WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr);
// replenish buffers
mqnic_refill_rx_buffers(priv, ring);
return done < budget;
}
void mqnic_rx_irq(struct mqnic_cq_ring *cq)
{
struct mqnic_priv *priv = netdev_priv(cq->ndev);
if (likely(priv->port_up))
{
napi_schedule_irqoff(&cq->napi);
}
else
{
mqnic_arm_cq(cq);
}
}
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget)
{
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
struct net_device *ndev = cq_ring->ndev;
if (!mqnic_process_rx_cq(ndev, cq_ring, budget))
{
return budget;
}
napi_complete(napi);
mqnic_arm_cq(cq_ring);
return 0;
}

446
modules/mqnic/mqnic_tx.c Normal file
View File

@ -0,0 +1,446 @@
/*
Copyright 2019, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
*/
#include "mqnic.h"
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
{
struct device *dev = priv->dev;
struct mqnic_ring *ring;
int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
{
dev_err(dev, "Failed to allocate TX ring");
return -ENOMEM;
}
ring->size = roundup_pow_of_two(size);
ring->full_size = ring->size >> 1;
ring->size_mask = ring->size-1;
ring->stride = roundup_pow_of_two(stride);
ring->tx_info = kvzalloc(sizeof(*ring->tx_info)*ring->size, GFP_KERNEL);
if (!ring->tx_info)
{
dev_err(dev, "Failed to allocate tx_info");
ret = -ENOMEM;
goto fail_ring;
}
ring->buf_size = ring->size*ring->stride;
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
if (!ring->buf)
{
dev_err(dev, "Failed to allocate TX ring DMA buffer");
ret = -ENOMEM;
goto fail_info;
}
ring->hw_addr = hw_addr;
ring->hw_ptr_mask = 0xffff;
ring->hw_head_ptr = hw_addr+MQNIC_QUEUE_HEAD_PTR_REG;
ring->hw_tail_ptr = hw_addr+MQNIC_QUEUE_TAIL_PTR_REG;
ring->head_ptr = 0;
ring->tail_ptr = 0;
ring->clean_tail_ptr = 0;
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4);
// set completion queue index
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG);
// set size
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
*ring_ptr = ring;
return 0;
fail_info:
kvfree(ring->tx_info);
ring->tx_info = NULL;
fail_ring:
kfree(ring);
*ring_ptr = NULL;
return ret;
}
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr)
{
struct device *dev = priv->dev;
struct mqnic_ring *ring = *ring_ptr;
*ring_ptr = NULL;
mqnic_deactivate_tx_ring(priv, ring);
mqnic_free_tx_buf(priv, ring);
dma_free_coherent(dev, ring->buf_size, ring->buf, ring->buf_dma_addr);
kvfree(ring->tx_info);
ring->tx_info = NULL;
kfree(ring);
}
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index)
{
// deactivate queue
iowrite32(0, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
// set base address
iowrite32(ring->buf_dma_addr, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+0);
iowrite32(ring->buf_dma_addr >> 32, ring->hw_addr+MQNIC_QUEUE_BASE_ADDR_REG+4);
// set completion queue index
iowrite32(cpl_index, ring->hw_addr+MQNIC_QUEUE_CPL_QUEUE_INDEX_REG);
// set pointers
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_HEAD_PTR_REG);
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_QUEUE_TAIL_PTR_REG);
// set size and activate queue
iowrite32(ilog2(ring->size) | MQNIC_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
return 0;
}
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
{
// deactivate queue
iowrite32(ilog2(ring->size), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
}
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring)
{
return ring->head_ptr == ring->clean_tail_ptr;
}
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring)
{
return ring->head_ptr - ring->clean_tail_ptr >= ring->full_size;
}
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring)
{
ring->tail_ptr += (ioread32(ring->hw_tail_ptr) - ring->tail_ptr) & ring->hw_ptr_mask;
}
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring)
{
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
}
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index, int napi_budget)
{
struct mqnic_tx_info *tx_info = &ring->tx_info[index];
dma_unmap_single(priv->dev, tx_info->dma_addr, tx_info->len, PCI_DMA_TODEVICE);
tx_info->dma_addr = 0;
napi_consume_skb(tx_info->skb, napi_budget);
tx_info->skb = NULL;
}
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
{
u32 index;
int cnt = 0;
while (!mqnic_is_tx_ring_empty(ring))
{
index = ring->clean_tail_ptr & ring->size_mask;
mqnic_free_tx_desc(priv, ring, index, 0);
ring->clean_tail_ptr++;
cnt++;
}
return cnt;
}
bool mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget)
{
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring = priv->tx_ring[cq_ring->ring_index];
struct mqnic_tx_info *tx_info;
struct mqnic_cpl *cpl;
u32 cq_index;
u32 cq_tail_ptr;
u32 ring_index;
u32 ring_clean_tail_ptr;
u32 packets = 0;
u32 bytes = 0;
int done = 0;
int budget = napi_budget;
if (unlikely(!priv->port_up))
{
return true;
}
// prefetch for BQL
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
// process completion queue
// read head pointer from NIC
mqnic_cq_read_head_ptr(cq_ring);
cq_tail_ptr = cq_ring->tail_ptr;
cq_index = cq_tail_ptr & cq_ring->size_mask;
while (cq_ring->head_ptr != cq_tail_ptr && done < budget)
{
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * MQNIC_CPL_SIZE);
ring_index = cpl->index & ring->size_mask;
tx_info = &ring->tx_info[ring_index];
// TX hardware timestamp
if (unlikely(tx_info->ts_requested))
{
struct skb_shared_hwtstamps hwts;
dev_info(&priv->mdev->pdev->dev, "mqnic_process_tx_cq TX TS requested");
hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
skb_tstamp_tx(tx_info->skb, &hwts);
}
// free TX descriptor
mqnic_free_tx_desc(priv, ring, ring_index, napi_budget);
packets++;
bytes += cpl->len;
done++;
cq_tail_ptr++;
cq_index = cq_tail_ptr & cq_ring->size_mask;
}
// update CQ tail
cq_ring->tail_ptr = cq_tail_ptr;
mqnic_cq_write_tail_ptr(cq_ring);
// process ring
// read tail pointer from NIC
mqnic_tx_read_tail_ptr(ring);
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
ring_index = ring_clean_tail_ptr & ring->size_mask;
while (ring_clean_tail_ptr != ring->tail_ptr)
{
tx_info = &ring->tx_info[ring_index];
if (tx_info->skb)
break;
ring_clean_tail_ptr++;
ring_index = ring_clean_tail_ptr & ring->size_mask;
}
// update ring tail
WRITE_ONCE(ring->clean_tail_ptr, ring_clean_tail_ptr);
// BQL
//netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
// wake queue if it is stopped
if (netif_tx_queue_stopped(ring->tx_queue) && !mqnic_is_tx_ring_full(ring))
{
netif_tx_wake_queue(ring->tx_queue);
}
return done < budget;
}
void mqnic_tx_irq(struct mqnic_cq_ring *cq)
{
struct mqnic_priv *priv = netdev_priv(cq->ndev);
if (likely(priv->port_up))
{
napi_schedule_irqoff(&cq->napi);
}
else
{
mqnic_arm_cq(cq);
}
}
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
{
struct mqnic_cq_ring *cq_ring = container_of(napi, struct mqnic_cq_ring, napi);
struct net_device *ndev = cq_ring->ndev;
if (!mqnic_process_tx_cq(ndev, cq_ring, budget))
{
return budget;
}
napi_complete(napi);
mqnic_arm_cq(cq_ring);
return 0;
}
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct mqnic_priv *priv = netdev_priv(ndev);
struct mqnic_ring *ring;
struct mqnic_tx_info *tx_info;
struct mqnic_desc *tx_desc;
int ring_index;
u32 index;
bool stop_queue;
u32 clean_tail_ptr;
if (unlikely(!priv->port_up))
{
goto tx_drop;
}
ring_index = skb_get_queue_mapping(skb);
if (unlikely(ring_index >= priv->tx_queue_count))
{
// queue mapping out of range
goto tx_drop;
}
ring = priv->tx_ring[ring_index];
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
// prefetch for BQL
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
index = ring->head_ptr & ring->size_mask;
tx_desc = (struct mqnic_desc *)(ring->buf + index * sizeof(*tx_desc));
tx_info = &ring->tx_info[index];
tx_info->skb = skb;
tx_info->len = skb->len;
// TX hardware timestamp
tx_info->ts_requested = 0;
if (unlikely(shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
dev_info(&priv->mdev->pdev->dev, "mqnic_start_xmit TX TS requested");
shinfo->tx_flags |= SKBTX_IN_PROGRESS;
tx_info->ts_requested = 1;
}
// TX hardware checksum
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned int csum_start_off = skb_checksum_start_offset(skb);
unsigned int csum_index_off = csum_start_off + skb->csum_offset;
dev_info(&priv->mdev->pdev->dev, "mqnic_start_xmit Hardware checksum requested start %d offset %d", csum_start_off, csum_index_off);
if (1 || csum_start_off > 127 || csum_index_off > 255 || csum_start_off & 1 || csum_index_off & 1)
{
// offset out of range, fall back on software checksum
if (skb_checksum_help(skb))
{
// software checksumming failed
goto tx_drop_count;
}
tx_desc->tx_csum_cmd = 0;
}
else
{
tx_desc->tx_csum_cmd = 0x8000 | (csum_start_off << 8) | (csum_index_off);
}
}
ring->packets++;
ring->bytes += tx_info->len;
// map skb
tx_info->dma_addr = dma_map_single(priv->dev, skb->data, skb->len, PCI_DMA_TODEVICE);
if (unlikely(dma_mapping_error(priv->dev, tx_info->dma_addr)))
{
// mapping failed
goto tx_drop_count;
}
// write descriptor
tx_desc->len = tx_info->len;
tx_desc->addr = tx_info->dma_addr;
// enqueue
ring->head_ptr++;
skb_tx_timestamp(skb);
stop_queue = mqnic_is_tx_ring_full(ring);
if (unlikely(stop_queue))
{
dev_info(&priv->mdev->pdev->dev, "mqnic_start_xmit TX ring full on port %d", priv->port);
netif_tx_stop_queue(ring->tx_queue);
}
// BQL
//netdev_tx_sent_queue(ring->tx_queue, tx_info->len);
//__netdev_tx_sent_queue(ring->tx_queue, tx_info->len, skb->xmit_more);
// enqueue on NIC
if (unlikely(!skb->xmit_more || stop_queue))
{
dma_wmb();
mqnic_tx_write_head_ptr(ring);
}
// check if queue restarted
if (unlikely(stop_queue))
{
smp_rmb();
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
if (unlikely(!mqnic_is_tx_ring_full(ring)))
{
netif_tx_wake_queue(ring->tx_queue);
}
}
return NETDEV_TX_OK;
tx_drop_count:
ring->dropped_packets++;
tx_drop:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}