1
0
mirror of https://github.com/corundum/corundum.git synced 2025-01-30 08:32:52 +08:00

Rename mqnic_priv.port to mqnic_priv.index

This commit is contained in:
Alex Forencich 2021-12-10 21:01:51 -08:00
parent c739b05b69
commit ed36f169f9
5 changed files with 21 additions and 21 deletions

View File

@ -287,8 +287,8 @@ struct mqnic_priv {
spinlock_t stats_lock;
int index;
bool registered;
int port;
bool port_up;
u32 if_id;
@ -331,7 +331,7 @@ extern const struct file_operations mqnic_fops;
// mqnic_netdev.c
void mqnic_update_stats(struct net_device *ndev);
int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr);
int mqnic_init_netdev(struct mqnic_dev *mdev, int index, u8 __iomem *hw_addr);
void mqnic_destroy_netdev(struct net_device *ndev);
// mqnic_port.c

View File

@ -187,7 +187,7 @@ void mqnic_process_eq(struct mqnic_eq_ring *eq_ring)
// transmit completion event
if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count)) {
dev_err(priv->dev, "%s on port %d: unknown event source %d (index %d, type %d)",
__func__, priv->port, le16_to_cpu(event->source), eq_index,
__func__, priv->index, le16_to_cpu(event->source), eq_index,
le16_to_cpu(event->type));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
event, MQNIC_EVENT_SIZE, true);
@ -200,7 +200,7 @@ void mqnic_process_eq(struct mqnic_eq_ring *eq_ring)
// receive completion event
if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count)) {
dev_err(priv->dev, "%s on port %d: unknown event source %d (index %d, type %d)",
__func__, priv->port, le16_to_cpu(event->source), eq_index,
__func__, priv->index, le16_to_cpu(event->source), eq_index,
le16_to_cpu(event->type));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
event, MQNIC_EVENT_SIZE, true);
@ -211,7 +211,7 @@ void mqnic_process_eq(struct mqnic_eq_ring *eq_ring)
}
} else {
dev_err(priv->dev, "%s on port %d: unknown event type %d (index %d, source %d)",
__func__, priv->port, le16_to_cpu(event->type), eq_index,
__func__, priv->index, le16_to_cpu(event->type), eq_index,
le16_to_cpu(event->source));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
event, MQNIC_EVENT_SIZE, true);

View File

@ -41,7 +41,7 @@ static int mqnic_start_port(struct net_device *ndev)
struct mqnic_dev *mdev = priv->mdev;
int k;
dev_info(mdev->dev, "%s on port %d", __func__, priv->port);
dev_info(mdev->dev, "%s on port %d", __func__, priv->index);
// set up event queues
for (k = 0; k < priv->event_queue_count; k++) {
@ -119,7 +119,7 @@ static int mqnic_stop_port(struct net_device *ndev)
struct mqnic_dev *mdev = priv->mdev;
int k;
dev_info(mdev->dev, "%s on port %d", __func__, priv->port);
dev_info(mdev->dev, "%s on port %d", __func__, priv->index);
netif_tx_lock_bh(ndev);
// if (detach)
@ -191,7 +191,7 @@ static int mqnic_open(struct net_device *ndev)
ret = mqnic_start_port(ndev);
if (ret)
dev_err(mdev->dev, "Failed to start port: %d", priv->port);
dev_err(mdev->dev, "Failed to start port: %d", priv->index);
mutex_unlock(&mdev->state_lock);
return ret;
@ -208,7 +208,7 @@ static int mqnic_close(struct net_device *ndev)
ret = mqnic_stop_port(ndev);
if (ret)
dev_err(mdev->dev, "Failed to stop port: %d", priv->port);
dev_err(mdev->dev, "Failed to stop port: %d", priv->index);
mutex_unlock(&mdev->state_lock);
return ret;
@ -366,7 +366,7 @@ static const struct net_device_ops mqnic_netdev_ops = {
.ndo_do_ioctl = mqnic_ioctl,
};
int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
int mqnic_init_netdev(struct mqnic_dev *mdev, int index, u8 __iomem *hw_addr)
{
struct device *dev = mdev->dev;
struct net_device *ndev;
@ -382,7 +382,7 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
}
SET_NETDEV_DEV(ndev, dev);
ndev->dev_port = port;
ndev->dev_port = index;
// init private data
priv = netdev_priv(ndev);
@ -393,7 +393,7 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
priv->ndev = ndev;
priv->mdev = mdev;
priv->dev = dev;
priv->port = port;
priv->index = index;
priv->port_up = false;
priv->hw_addr = hw_addr;
@ -452,11 +452,11 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
// set MAC
ndev->addr_len = ETH_ALEN;
if (port >= mdev->mac_count) {
if (index >= mdev->mac_count) {
dev_warn(dev, "Exhausted permanent MAC addresses; using random MAC");
eth_hw_addr_random(ndev);
} else {
memcpy(ndev->dev_addr, mdev->mac_list[port], ETH_ALEN);
memcpy(ndev->dev_addr, mdev->mac_list[index], ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr)) {
dev_warn(dev, "Invalid MAC address in list; using random MAC");
@ -550,13 +550,13 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
ret = register_netdev(ndev);
if (ret) {
dev_err(dev, "netdev registration failed on port %d", port);
dev_err(dev, "netdev registration failed on port %d", index);
goto fail;
}
priv->registered = 1;
mdev->ndev[port] = ndev;
mdev->ndev[index] = ndev;
return 0;
@ -574,7 +574,7 @@ void mqnic_destroy_netdev(struct net_device *ndev)
if (priv->registered)
unregister_netdev(ndev);
mdev->ndev[priv->port] = NULL;
mdev->ndev[priv->index] = NULL;
// free rings
for (k = 0; k < ARRAY_SIZE(priv->event_ring); k++)

View File

@ -212,14 +212,14 @@ int mqnic_prepare_rx_desc(struct mqnic_ring *ring, int index)
if (unlikely(page)) {
dev_err(ring->dev, "%s: skb not yet processed on port %d",
__func__, ring->priv->port);
__func__, ring->priv->index);
return -1;
}
page = dev_alloc_pages(page_order);
if (unlikely(!page)) {
dev_err(ring->dev, "%s: failed to allocate memory on port %d",
__func__, ring->priv->port);
__func__, ring->priv->index);
return -1;
}
@ -228,7 +228,7 @@ int mqnic_prepare_rx_desc(struct mqnic_ring *ring, int index)
if (unlikely(dma_mapping_error(ring->dev, dma_addr))) {
dev_err(ring->dev, "%s: DMA mapping failed on port %d",
__func__, ring->priv->port);
__func__, ring->priv->index);
__free_pages(page, page_order);
return -1;
}

View File

@ -479,7 +479,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
stop_queue = mqnic_is_tx_ring_full(ring);
if (unlikely(stop_queue)) {
dev_info(priv->dev, "%s: TX ring %d full on port %d",
__func__, ring_index, priv->port);
__func__, ring_index, priv->index);
netif_tx_stop_queue(ring->tx_queue);
}