1
0
mirror of https://github.com/corundum/corundum.git synced 2025-01-16 08:12:53 +08:00

Add device object reference in mqnic_dev and clean up references to device object

This commit is contained in:
Alex Forencich 2020-07-30 19:37:34 -07:00
parent e7bcb726b4
commit e60e3a993f
10 changed files with 42 additions and 52 deletions

View File

@ -66,6 +66,7 @@ struct mqnic_i2c_priv
};
struct mqnic_dev {
struct device *dev;
struct pci_dev *pdev;
size_t hw_regs_size;

View File

@ -58,13 +58,12 @@ static int mqnic_release(struct inode *inode, struct file *filp)
static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *vma)
{
struct device *dev = &mqnic->pdev->dev;
size_t map_size = vma->vm_end - vma->vm_start;
int ret;
if (map_size > mqnic->hw_regs_size)
{
dev_err(dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <=%zu)", vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
dev_err(mqnic->dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <=%zu)", vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
return -EINVAL;
}
@ -72,11 +71,11 @@ static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *v
if (ret)
{
dev_err(dev, "mqnic_map_registers: remap_pfn_range failed for registers region");
dev_err(mqnic->dev, "mqnic_map_registers: remap_pfn_range failed for registers region");
}
else
{
dev_dbg(dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p", &mqnic->hw_regs_phys, (void *)vma->vm_start);
dev_dbg(mqnic->dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p", &mqnic->hw_regs_phys, (void *)vma->vm_start);
}
return ret;
@ -85,7 +84,6 @@ static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *v
static int mqnic_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct mqnic_dev *mqnic = filp->private_data;
struct device *dev = &mqnic->pdev->dev;
int ret;
if (vma->vm_pgoff == 0)
@ -100,7 +98,7 @@ static int mqnic_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
fail_invalid_offset:
dev_err(dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu", vma->vm_pgoff);
dev_err(mqnic->dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu", vma->vm_pgoff);
return -EINVAL;
}

View File

@ -184,7 +184,7 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
// transmit completion event
if (unlikely(event->source > priv->tx_cpl_queue_count))
{
dev_err(&priv->mdev->pdev->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", priv->port, event->source, eq_index, event->type);
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", priv->port, event->source, eq_index, event->type);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true);
}
else
@ -201,7 +201,7 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
// receive completion event
if (unlikely(event->source > priv->rx_cpl_queue_count))
{
dev_err(&priv->mdev->pdev->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", priv->port, event->source, eq_index, event->type);
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", priv->port, event->source, eq_index, event->type);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true);
}
else
@ -215,7 +215,7 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
}
else
{
dev_err(&priv->mdev->pdev->dev, "mqnic_process_eq on port %d: unknown event type %d (index %d, source %d)", priv->port, event->type, eq_index, event->source);
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event type %d (index %d, source %d)", priv->port, event->type, eq_index, event->source);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true);
}

View File

@ -42,7 +42,7 @@ static void mqnic_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *d
strlcpy(drvinfo->version, DRIVER_VERSION, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d", mdev->fw_ver >> 16, mdev->fw_ver & 0xffff);
strlcpy(drvinfo->bus_info, pci_name(mdev->pdev), sizeof(drvinfo->bus_info));
strlcpy(drvinfo->bus_info, dev_name(mdev->dev), sizeof(drvinfo->bus_info));
}
static int mqnic_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)

View File

@ -112,7 +112,7 @@ int mqnic_init_i2c(struct mqnic_dev *mqnic)
mqnic->eeprom_i2c_algo = mqnic_i2c_algo;
mqnic->eeprom_i2c_algo.data = &mqnic->eeprom_i2c_priv;
mqnic->eeprom_i2c_adap.algo_data = &mqnic->eeprom_i2c_algo;
mqnic->eeprom_i2c_adap.dev.parent = &mqnic->pdev->dev;
mqnic->eeprom_i2c_adap.dev.parent = mqnic->dev;
iowrite32(ioread32(mqnic->hw_addr+MQNIC_REG_GPIO_OUT) & ~(1 << 26), mqnic->hw_addr+MQNIC_REG_GPIO_OUT); // WP disable
strlcpy(mqnic->eeprom_i2c_adap.name, "mqnic EEPROM", sizeof(mqnic->eeprom_i2c_adap.name));
ret = i2c_bit_add_bus(&mqnic->eeprom_i2c_adap);

View File

@ -146,6 +146,7 @@ static int mqnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
}
mqnic->dev = dev;
mqnic->pdev = pdev;
pci_set_drvdata(pdev, mqnic);
@ -356,16 +357,11 @@ fail_enable_device:
static void mqnic_remove(struct pci_dev *pdev)
{
struct mqnic_dev *mqnic;
struct device *dev = &pdev->dev;
struct mqnic_dev *mqnic = pci_get_drvdata(pdev);
int k = 0;
dev_info(dev, "mqnic remove");
if (!(mqnic = pci_get_drvdata(pdev))) {
return;
}
dev_info(&pdev->dev, "mqnic remove");
misc_deregister(&mqnic->misc_dev);
@ -398,13 +394,8 @@ static void mqnic_remove(struct pci_dev *pdev)
static void mqnic_shutdown(struct pci_dev *pdev)
{
struct mqnic_dev *mqnic = pci_get_drvdata(pdev);
struct device *dev = &pdev->dev;
dev_info(dev, "mqnic shutdown");
if (!mqnic) {
return;
}
dev_info(&pdev->dev, "mqnic shutdown");
mqnic_remove(pdev);
}

View File

@ -39,7 +39,7 @@ static int mqnic_start_port(struct net_device *ndev)
struct mqnic_dev *mdev = priv->mdev;
int k;
dev_info(&mdev->pdev->dev, "mqnic_open on port %d", priv->port);
dev_info(mdev->dev, "mqnic_start_port on port %d", priv->port);
// set up event queues
for (k = 0; k < priv->event_queue_count; k++)
@ -121,7 +121,7 @@ static int mqnic_stop_port(struct net_device *ndev)
struct mqnic_dev *mdev = priv->mdev;
int k;
dev_info(&mdev->pdev->dev, "mqnic_close on port %d", priv->port);
dev_info(mdev->dev, "mqnic_stop_port on port %d", priv->port);
netif_tx_lock_bh(ndev);
// if (detach)
@ -208,7 +208,7 @@ static int mqnic_open(struct net_device *ndev)
if (ret)
{
dev_err(&mdev->pdev->dev, "Failed to start port: %d", priv->port);
dev_err(mdev->dev, "Failed to start port: %d", priv->port);
}
mutex_unlock(&mdev->state_lock);
@ -227,7 +227,7 @@ static int mqnic_close(struct net_device *ndev)
if (ret)
{
dev_err(&mdev->pdev->dev, "Failed to stop port: %d", priv->port);
dev_err(mdev->dev, "Failed to stop port: %d", priv->port);
}
mutex_unlock(&mdev->state_lock);
@ -358,11 +358,11 @@ static int mqnic_change_mtu(struct net_device *ndev, int new_mtu)
if (new_mtu < ndev->min_mtu || new_mtu > ndev->max_mtu)
{
dev_err(&mdev->pdev->dev, "Bad MTU: %d", new_mtu);
dev_err(mdev->dev, "Bad MTU: %d", new_mtu);
return -EPERM;
}
dev_info(&mdev->pdev->dev, "New MTU: %d", new_mtu);
dev_info(mdev->dev, "New MTU: %d", new_mtu);
ndev->mtu = new_mtu;
@ -403,7 +403,7 @@ static const struct net_device_ops mqnic_netdev_ops = {
int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
{
struct device *dev = &mdev->pdev->dev;
struct device *dev = mdev->dev;
struct net_device *ndev;
struct mqnic_priv *priv;
int ret = 0;
@ -416,7 +416,7 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
return -ENOMEM;
}
SET_NETDEV_DEV(ndev, &mdev->pdev->dev);
SET_NETDEV_DEV(ndev, dev);
ndev->dev_port = port;
// init private data

View File

@ -59,7 +59,7 @@ static int mqnic_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
bool neg = false;
u64 nom_per_fns, adj;
dev_info(&mdev->pdev->dev, "mqnic_phc_adjfine scaled_ppm: %ld", scaled_ppm);
dev_info(mdev->dev, "mqnic_phc_adjfine scaled_ppm: %ld", scaled_ppm);
if (scaled_ppm < 0)
{
@ -87,7 +87,7 @@ static int mqnic_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
iowrite32(adj & 0xffffffff, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_PERIOD_FNS);
iowrite32(adj >> 32, mdev->phc_hw_addr+MQNIC_PHC_REG_PTP_PERIOD_NS);
dev_info(&mdev->pdev->dev, "mqnic_phc_adjfine adj: 0x%llx", adj);
dev_info(mdev->dev, "mqnic_phc_adjfine adj: 0x%llx", adj);
return 0;
}
@ -137,7 +137,7 @@ static int mqnic_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
struct timespec64 ts;
dev_info(&mdev->pdev->dev, "mqnic_phc_adjtime delta: %lld", delta);
dev_info(mdev->dev, "mqnic_phc_adjtime delta: %lld", delta);
if (delta > 1000000000 || delta < -1000000000)
{
@ -191,9 +191,9 @@ static int mqnic_phc_perout(struct ptp_clock_info *ptp, int on, struct ptp_perou
width_sec = period_sec >> 1;
width_nsec = (period_nsec + (period_sec & 1 ? NSEC_PER_SEC : 0)) >> 1;
dev_info(&mdev->pdev->dev, "mqnic_phc_perout start: %lld.%09d", start_sec, start_nsec);
dev_info(&mdev->pdev->dev, "mqnic_phc_perout period: %lld.%09d", period_sec, period_nsec);
dev_info(&mdev->pdev->dev, "mqnic_phc_perout width: %lld.%09d", width_sec, width_nsec);
dev_info(mdev->dev, "mqnic_phc_perout start: %lld.%09d", start_sec, start_nsec);
dev_info(mdev->dev, "mqnic_phc_perout period: %lld.%09d", period_sec, period_nsec);
dev_info(mdev->dev, "mqnic_phc_perout width: %lld.%09d", width_sec, width_nsec);
iowrite32(0, hw_addr+MQNIC_PHC_REG_PEROUT_START_FNS);
iowrite32(start_nsec, hw_addr+MQNIC_PHC_REG_PEROUT_START_NS);
@ -277,16 +277,16 @@ void mqnic_register_phc(struct mqnic_dev *mdev)
mdev->ptp_clock_info.settime64 = mqnic_phc_settime,
mdev->ptp_clock_info.enable = mqnic_phc_enable,
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, &mdev->pdev->dev);
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, mdev->dev);
if (IS_ERR(mdev->ptp_clock))
{
mdev->ptp_clock = NULL;
dev_err(&mdev->pdev->dev, "ptp_clock_register failed");
dev_err(mdev->dev, "ptp_clock_register failed");
}
else
{
dev_info(&mdev->pdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock));
dev_info(mdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock));
mqnic_phc_set_from_system_clock(&mdev->ptp_clock_info);
}
@ -298,7 +298,7 @@ void mqnic_unregister_phc(struct mqnic_dev *mdev)
{
ptp_clock_unregister(mdev->ptp_clock);
mdev->ptp_clock = NULL;
dev_info(&mdev->pdev->dev, "unregistered PHC");
dev_info(mdev->dev, "unregistered PHC");
}
}

View File

@ -209,14 +209,14 @@ int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int
if (unlikely(page))
{
dev_err(&priv->mdev->pdev->dev, "mqnic_prepare_rx_desc skb not yet processed on port %d", priv->port);
dev_err(priv->dev, "mqnic_prepare_rx_desc skb not yet processed on port %d", priv->port);
return -1;
}
page = dev_alloc_pages(page_order);
if (unlikely(!page))
{
dev_err(&priv->mdev->pdev->dev, "mqnic_prepare_rx_desc failed to allocate memory on port %d", priv->port);
dev_err(priv->dev, "mqnic_prepare_rx_desc failed to allocate memory on port %d", priv->port);
return -1;
}
@ -225,7 +225,7 @@ int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
{
dev_err(&priv->mdev->pdev->dev, "mqnic_prepare_rx_desc DMA mapping failed on port %d", priv->port);
dev_err(priv->dev, "mqnic_prepare_rx_desc DMA mapping failed on port %d", priv->port);
__free_pages(page, page_order);
return -1;
}
@ -302,7 +302,7 @@ int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
if (unlikely(!page))
{
dev_err(&priv->mdev->pdev->dev, "mqnic_process_rx_cq ring %d null page at index %d", cq_ring->ring_index, ring_index);
dev_err(priv->dev, "mqnic_process_rx_cq ring %d null page at index %d", cq_ring->ring_index, ring_index);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, cpl, MQNIC_CPL_SIZE, true);
break;
}
@ -310,7 +310,7 @@ int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
skb = napi_get_frags(&cq_ring->napi);
if (unlikely(!skb))
{
dev_err(&priv->mdev->pdev->dev, "mqnic_process_rx_cq ring %d failed to allocate skb", cq_ring->ring_index);
dev_err(priv->dev, "mqnic_process_rx_cq ring %d failed to allocate skb", cq_ring->ring_index);
break;
}

View File

@ -248,7 +248,7 @@ int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
if (unlikely(tx_info->ts_requested))
{
struct skb_shared_hwtstamps hwts;
dev_info(&priv->mdev->pdev->dev, "mqnic_process_tx_cq TX TS requested");
dev_info(priv->dev, "mqnic_process_tx_cq TX TS requested");
hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
skb_tstamp_tx(tx_info->skb, &hwts);
}
@ -395,7 +395,7 @@ static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring, stru
return true;
map_error:
dev_err(&priv->mdev->pdev->dev, "mqnic_map_skb DMA mapping failed");
dev_err(priv->dev, "mqnic_map_skb DMA mapping failed");
// unmap frags
for (i = 0; i < tx_info->frag_count; i++)
@ -451,7 +451,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
// TX hardware timestamp
tx_info->ts_requested = 0;
if (unlikely(priv->if_features & MQNIC_IF_FEATURE_PTP_TS && shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
dev_info(&priv->mdev->pdev->dev, "mqnic_start_xmit TX TS requested");
dev_info(priv->dev, "mqnic_start_xmit TX TS requested");
shinfo->tx_flags |= SKBTX_IN_PROGRESS;
tx_info->ts_requested = 1;
}
@ -463,7 +463,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (csum_start > 255 || csum_offset > 127)
{
dev_info(&priv->mdev->pdev->dev, "mqnic_start_xmit Hardware checksum fallback start %d offset %d", csum_start, csum_offset);
dev_info(priv->dev, "mqnic_start_xmit Hardware checksum fallback start %d offset %d", csum_start, csum_offset);
// offset out of range, fall back on software checksum
if (skb_checksum_help(skb))
@ -511,7 +511,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
stop_queue = mqnic_is_tx_ring_full(ring);
if (unlikely(stop_queue))
{
dev_info(&priv->mdev->pdev->dev, "mqnic_start_xmit TX ring %d full on port %d", ring_index, priv->port);
dev_info(priv->dev, "mqnic_start_xmit TX ring %d full on port %d", ring_index, priv->port);
netif_tx_stop_queue(ring->tx_queue);
}