mirror of
https://github.com/corundum/corundum.git
synced 2025-02-06 08:38:23 +08:00
Fix kernel module coding style
This commit is contained in:
parent
1bce5827c9
commit
5b49f09baa
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -57,8 +57,7 @@ struct mqnic_board_ops {
|
|||||||
void (*deinit)(struct mqnic_dev *mqnic);
|
void (*deinit)(struct mqnic_dev *mqnic);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mqnic_i2c_bus
|
struct mqnic_i2c_bus {
|
||||||
{
|
|
||||||
struct mqnic_dev *mqnic;
|
struct mqnic_dev *mqnic;
|
||||||
|
|
||||||
u8 __iomem *scl_in_reg;
|
u8 __iomem *scl_in_reg;
|
||||||
@ -327,7 +326,8 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr);
|
|||||||
void mqnic_destroy_netdev(struct net_device *ndev);
|
void mqnic_destroy_netdev(struct net_device *ndev);
|
||||||
|
|
||||||
// mqnic_port.c
|
// mqnic_port.c
|
||||||
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr, int index, u8 __iomem *hw_addr);
|
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
|
||||||
|
int index, u8 __iomem *hw_addr);
|
||||||
void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr);
|
void mqnic_destroy_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr);
|
||||||
int mqnic_activate_port(struct mqnic_port *port);
|
int mqnic_activate_port(struct mqnic_port *port);
|
||||||
void mqnic_deactivate_port(struct mqnic_port *port);
|
void mqnic_deactivate_port(struct mqnic_port *port);
|
||||||
@ -341,7 +341,8 @@ void mqnic_port_set_rx_mtu(struct mqnic_port *port, u32 mtu);
|
|||||||
// mqnic_ptp.c
|
// mqnic_ptp.c
|
||||||
void mqnic_register_phc(struct mqnic_dev *mdev);
|
void mqnic_register_phc(struct mqnic_dev *mdev);
|
||||||
void mqnic_unregister_phc(struct mqnic_dev *mdev);
|
void mqnic_unregister_phc(struct mqnic_dev *mdev);
|
||||||
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring, const struct mqnic_cpl *cpl);
|
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring,
|
||||||
|
const struct mqnic_cpl *cpl);
|
||||||
|
|
||||||
// mqnic_i2c.c
|
// mqnic_i2c.c
|
||||||
struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *reg);
|
struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *reg);
|
||||||
@ -356,9 +357,11 @@ int mqnic_board_init(struct mqnic_dev *mqnic);
|
|||||||
void mqnic_board_deinit(struct mqnic_dev *mqnic);
|
void mqnic_board_deinit(struct mqnic_dev *mqnic);
|
||||||
|
|
||||||
// mqnic_eq.c
|
// mqnic_eq.c
|
||||||
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
|
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr,
|
||||||
|
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||||
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr);
|
void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr);
|
||||||
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring, int int_index);
|
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
|
||||||
|
int int_index);
|
||||||
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring);
|
void mqnic_deactivate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring);
|
||||||
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring);
|
bool mqnic_is_eq_ring_empty(const struct mqnic_eq_ring *ring);
|
||||||
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring);
|
bool mqnic_is_eq_ring_full(const struct mqnic_eq_ring *ring);
|
||||||
@ -368,9 +371,11 @@ void mqnic_arm_eq(struct mqnic_eq_ring *ring);
|
|||||||
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring);
|
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring);
|
||||||
|
|
||||||
// mqnic_cq.c
|
// mqnic_cq.c
|
||||||
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
|
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr,
|
||||||
|
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||||
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr);
|
void mqnic_destroy_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr);
|
||||||
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring, int eq_index);
|
int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring,
|
||||||
|
int eq_index);
|
||||||
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring);
|
void mqnic_deactivate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring);
|
||||||
bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring);
|
bool mqnic_is_cq_ring_empty(const struct mqnic_cq_ring *ring);
|
||||||
bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring);
|
bool mqnic_is_cq_ring_full(const struct mqnic_cq_ring *ring);
|
||||||
@ -379,35 +384,44 @@ void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring);
|
|||||||
void mqnic_arm_cq(struct mqnic_cq_ring *ring);
|
void mqnic_arm_cq(struct mqnic_cq_ring *ring);
|
||||||
|
|
||||||
// mqnic_tx.c
|
// mqnic_tx.c
|
||||||
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
|
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||||
|
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||||
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
|
void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
|
||||||
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index);
|
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
int cpl_index);
|
||||||
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||||
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring);
|
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring);
|
||||||
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring);
|
bool mqnic_is_tx_ring_full(const struct mqnic_ring *ring);
|
||||||
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring);
|
void mqnic_tx_read_tail_ptr(struct mqnic_ring *ring);
|
||||||
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring);
|
void mqnic_tx_write_head_ptr(struct mqnic_ring *ring);
|
||||||
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index, int napi_budget);
|
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
int index, int napi_budget);
|
||||||
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||||
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget);
|
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||||
|
int napi_budget);
|
||||||
void mqnic_tx_irq(struct mqnic_cq_ring *cq);
|
void mqnic_tx_irq(struct mqnic_cq_ring *cq);
|
||||||
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget);
|
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget);
|
||||||
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||||
|
|
||||||
// mqnic_rx.c
|
// mqnic_rx.c
|
||||||
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr);
|
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||||
|
int size, int stride, int index, u8 __iomem *hw_addr);
|
||||||
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
|
void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr);
|
||||||
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index);
|
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
int cpl_index);
|
||||||
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||||
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring);
|
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring);
|
||||||
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring);
|
bool mqnic_is_rx_ring_full(const struct mqnic_ring *ring);
|
||||||
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring);
|
void mqnic_rx_read_tail_ptr(struct mqnic_ring *ring);
|
||||||
void mqnic_rx_write_head_ptr(struct mqnic_ring *ring);
|
void mqnic_rx_write_head_ptr(struct mqnic_ring *ring);
|
||||||
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index);
|
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
int index);
|
||||||
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||||
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index);
|
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
int index);
|
||||||
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring);
|
||||||
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget);
|
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||||
|
int napi_budget);
|
||||||
void mqnic_rx_irq(struct mqnic_cq_ring *cq);
|
void mqnic_rx_irq(struct mqnic_cq_ring *cq);
|
||||||
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget);
|
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget);
|
||||||
|
|
||||||
|
@ -42,7 +42,8 @@ static const struct property_entry i2c_mux_props[] = {
|
|||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct i2c_client *create_i2c_client(struct i2c_adapter *adapter, const char *type, int addr, const struct property_entry *props)
|
static struct i2c_client *create_i2c_client(struct i2c_adapter *adapter,
|
||||||
|
const char *type, int addr, const struct property_entry *props)
|
||||||
{
|
{
|
||||||
struct i2c_client *client;
|
struct i2c_client *client;
|
||||||
struct i2c_board_info board_info;
|
struct i2c_board_info board_info;
|
||||||
@ -57,8 +58,7 @@ static struct i2c_client *create_i2c_client(struct i2c_adapter *adapter, const c
|
|||||||
memset(&board_info, 0, sizeof(board_info));
|
memset(&board_info, 0, sizeof(board_info));
|
||||||
strscpy(board_info.type, type, I2C_NAME_SIZE);
|
strscpy(board_info.type, type, I2C_NAME_SIZE);
|
||||||
board_info.addr = addr;
|
board_info.addr = addr;
|
||||||
if (props)
|
if (props) {
|
||||||
{
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
|
||||||
memset(&sw_node, 0, sizeof(sw_node));
|
memset(&sw_node, 0, sizeof(sw_node));
|
||||||
sw_node.properties = props;
|
sw_node.properties = props;
|
||||||
@ -67,7 +67,6 @@ static struct i2c_client *create_i2c_client(struct i2c_adapter *adapter, const c
|
|||||||
board_info.properties = props;
|
board_info.properties = props;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
|
||||||
client = i2c_new_client_device(adapter, &board_info);
|
client = i2c_new_client_device(adapter, &board_info);
|
||||||
#else
|
#else
|
||||||
@ -110,15 +109,13 @@ static int init_mac_list_from_base_mac(struct mqnic_dev *mqnic, int count, char
|
|||||||
|
|
||||||
count = min(count, MQNIC_MAX_IF);
|
count = min(count, MQNIC_MAX_IF);
|
||||||
|
|
||||||
if (!is_valid_ether_addr(mac))
|
if (!is_valid_ether_addr(mac)) {
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "Base MAC is not valid");
|
dev_warn(mqnic->dev, "Base MAC is not valid");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
mqnic->mac_count = count;
|
mqnic->mac_count = count;
|
||||||
for (k = 0; k < mqnic->mac_count; k++)
|
for (k = 0; k < mqnic->mac_count; k++) {
|
||||||
{
|
|
||||||
memcpy(mqnic->mac_list[k], mac, ETH_ALEN);
|
memcpy(mqnic->mac_list[k], mac, ETH_ALEN);
|
||||||
mqnic->mac_list[k][ETH_ALEN - 1] += k;
|
mqnic->mac_list[k][ETH_ALEN - 1] += k;
|
||||||
}
|
}
|
||||||
@ -126,19 +123,18 @@ static int init_mac_list_from_base_mac(struct mqnic_dev *mqnic, int count, char
|
|||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int read_mac_from_eeprom(struct mqnic_dev *mqnic, struct i2c_client *eeprom, int offset, char *mac)
|
static int read_mac_from_eeprom(struct mqnic_dev *mqnic,
|
||||||
|
struct i2c_client *eeprom, int offset, char *mac)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!eeprom)
|
if (!eeprom) {
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "Failed to read MAC from EEPROM; no EEPROM I2C client registered");
|
dev_warn(mqnic->dev, "Failed to read MAC from EEPROM; no EEPROM I2C client registered");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = i2c_smbus_read_i2c_block_data(eeprom, offset, ETH_ALEN, mac);
|
ret = i2c_smbus_read_i2c_block_data(eeprom, offset, ETH_ALEN, mac);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "Failed to read MAC from EEPROM");
|
dev_warn(mqnic->dev, "Failed to read MAC from EEPROM");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -146,19 +142,17 @@ static int read_mac_from_eeprom(struct mqnic_dev *mqnic, struct i2c_client *eepr
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int init_mac_list_from_eeprom_base(struct mqnic_dev *mqnic, struct i2c_client *eeprom, int offset, int count)
|
static int init_mac_list_from_eeprom_base(struct mqnic_dev *mqnic,
|
||||||
|
struct i2c_client *eeprom, int offset, int count)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
char mac[ETH_ALEN];
|
char mac[ETH_ALEN];
|
||||||
|
|
||||||
ret = read_mac_from_eeprom(mqnic, eeprom, offset, mac);
|
ret = read_mac_from_eeprom(mqnic, eeprom, offset, mac);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
{
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_valid_ether_addr(mac))
|
if (!is_valid_ether_addr(mac)) {
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "EEPROM does not contain a valid base MAC");
|
dev_warn(mqnic->dev, "EEPROM does not contain a valid base MAC");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -174,8 +168,7 @@ static int mqnic_generic_board_init(struct mqnic_dev *mqnic)
|
|||||||
|
|
||||||
mqnic->mod_i2c_client_count = 0;
|
mqnic->mod_i2c_client_count = 0;
|
||||||
|
|
||||||
if (mqnic_i2c_init(mqnic))
|
if (mqnic_i2c_init(mqnic)) {
|
||||||
{
|
|
||||||
dev_err(mqnic->dev, "Failed to initialize I2C subsystem");
|
dev_err(mqnic->dev, "Failed to initialize I2C subsystem");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -423,17 +416,14 @@ static void mqnic_generic_board_deinit(struct mqnic_dev *mqnic)
|
|||||||
int k;
|
int k;
|
||||||
|
|
||||||
// unregister I2C clients
|
// unregister I2C clients
|
||||||
for (k = 0; k < ARRAY_SIZE(mqnic->mod_i2c_client); k++)
|
for (k = 0; k < ARRAY_SIZE(mqnic->mod_i2c_client); k++) {
|
||||||
{
|
if (mqnic->mod_i2c_client[k]) {
|
||||||
if (mqnic->mod_i2c_client[k])
|
|
||||||
{
|
|
||||||
i2c_unregister_device(mqnic->mod_i2c_client[k]);
|
i2c_unregister_device(mqnic->mod_i2c_client[k]);
|
||||||
mqnic->mod_i2c_client[k] = NULL;
|
mqnic->mod_i2c_client[k] = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mqnic->eeprom_i2c_client)
|
if (mqnic->eeprom_i2c_client) {
|
||||||
{
|
|
||||||
i2c_unregister_device(mqnic->eeprom_i2c_client);
|
i2c_unregister_device(mqnic->eeprom_i2c_client);
|
||||||
mqnic->eeprom_i2c_client = NULL;
|
mqnic->eeprom_i2c_client = NULL;
|
||||||
}
|
}
|
||||||
@ -483,17 +473,14 @@ static int mqnic_alveo_bmc_read_mac_list(struct mqnic_dev *mqnic, int count)
|
|||||||
count = min(count, MQNIC_MAX_IF);
|
count = min(count, MQNIC_MAX_IF);
|
||||||
|
|
||||||
mqnic->mac_count = 0;
|
mqnic->mac_count = 0;
|
||||||
for (k = 0; k < count; k++)
|
for (k = 0; k < count; k++) {
|
||||||
{
|
|
||||||
ret = mqnic_alveo_bmc_read_mac(mqnic, k, mac);
|
ret = mqnic_alveo_bmc_read_mac(mqnic, k, mac);
|
||||||
if (ret)
|
if (ret) {
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "Failed to read MAC from Alveo BMC");
|
dev_warn(mqnic->dev, "Failed to read MAC from Alveo BMC");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_valid_ether_addr(mac))
|
if (is_valid_ether_addr(mac)) {
|
||||||
{
|
|
||||||
memcpy(mqnic->mac_list[mqnic->mac_count], mac, ETH_ALEN);
|
memcpy(mqnic->mac_list[mqnic->mac_count], mac, ETH_ALEN);
|
||||||
mqnic->mac_count++;
|
mqnic->mac_count++;
|
||||||
}
|
}
|
||||||
@ -502,9 +489,7 @@ static int mqnic_alveo_bmc_read_mac_list(struct mqnic_dev *mqnic, int count)
|
|||||||
dev_info(mqnic->dev, "Read %d MACs from Alveo BMC", mqnic->mac_count);
|
dev_info(mqnic->dev, "Read %d MACs from Alveo BMC", mqnic->mac_count);
|
||||||
|
|
||||||
if (mqnic->mac_count == 0)
|
if (mqnic->mac_count == 0)
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "Failed to read any valid MACs from Alveo BMC");
|
dev_warn(mqnic->dev, "Failed to read any valid MACs from Alveo BMC");
|
||||||
}
|
|
||||||
|
|
||||||
return mqnic->mac_count;
|
return mqnic->mac_count;
|
||||||
}
|
}
|
||||||
@ -517,8 +502,7 @@ static int mqnic_alveo_board_init(struct mqnic_dev *mqnic)
|
|||||||
|
|
||||||
mqnic->mod_i2c_client_count = 0;
|
mqnic->mod_i2c_client_count = 0;
|
||||||
|
|
||||||
if (mqnic_i2c_init(mqnic))
|
if (mqnic_i2c_init(mqnic)) {
|
||||||
{
|
|
||||||
dev_err(mqnic->dev, "Failed to initialize I2C subsystem");
|
dev_err(mqnic->dev, "Failed to initialize I2C subsystem");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -560,8 +544,7 @@ static int mqnic_alveo_board_init(struct mqnic_dev *mqnic)
|
|||||||
// init BMC
|
// init BMC
|
||||||
|
|
||||||
if (mqnic_alveo_bmc_reg_read(mqnic, 0x020000) == 0 ||
|
if (mqnic_alveo_bmc_reg_read(mqnic, 0x020000) == 0 ||
|
||||||
mqnic_alveo_bmc_reg_read(mqnic, 0x028000) != 0x74736574)
|
mqnic_alveo_bmc_reg_read(mqnic, 0x028000) != 0x74736574) {
|
||||||
{
|
|
||||||
dev_info(mqnic->dev, "Resetting Alveo CMS");
|
dev_info(mqnic->dev, "Resetting Alveo CMS");
|
||||||
|
|
||||||
mqnic_alveo_bmc_reg_write(mqnic, 0x020000, 0);
|
mqnic_alveo_bmc_reg_write(mqnic, 0x020000, 0);
|
||||||
@ -570,13 +553,9 @@ static int mqnic_alveo_board_init(struct mqnic_dev *mqnic)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (mqnic_alveo_bmc_reg_read(mqnic, 0x028000) != 0x74736574)
|
if (mqnic_alveo_bmc_reg_read(mqnic, 0x028000) != 0x74736574)
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "Alveo CMS not responding");
|
dev_warn(mqnic->dev, "Alveo CMS not responding");
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
mqnic_alveo_bmc_read_mac_list(mqnic, 8);
|
mqnic_alveo_bmc_read_mac_list(mqnic, 8);
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -596,25 +575,19 @@ static int mqnic_gecko_bmc_read(struct mqnic_dev *mqnic)
|
|||||||
u32 val;
|
u32 val;
|
||||||
int timeout = 200;
|
int timeout = 200;
|
||||||
|
|
||||||
while (1)
|
while (1) {
|
||||||
{
|
|
||||||
val = ioread32(mqnic->hw_addr + 0x188);
|
val = ioread32(mqnic->hw_addr + 0x188);
|
||||||
if (val & BIT(19))
|
if (val & BIT(19)) {
|
||||||
{
|
if (val & BIT(18)) {
|
||||||
if (val & BIT(18))
|
|
||||||
{
|
|
||||||
// timed out
|
// timed out
|
||||||
dev_warn(mqnic->dev, "Timed out waiting for Gecko BMC response");
|
dev_warn(mqnic->dev, "Timed out waiting for Gecko BMC response");
|
||||||
msleep(10);
|
msleep(10);
|
||||||
return -2;
|
return -2;
|
||||||
}
|
}
|
||||||
return val & 0xffff;
|
return val & 0xffff;
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
timeout--;
|
timeout--;
|
||||||
if (timeout == 0)
|
if (timeout == 0) {
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "Timed out waiting for Gecko BMC interface");
|
dev_warn(mqnic->dev, "Timed out waiting for Gecko BMC interface");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -655,8 +628,7 @@ static int mqnic_gecko_bmc_read_mac(struct mqnic_dev *mqnic, int index, char *ma
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ETH_ALEN; i += 2)
|
for (i = 0; i < ETH_ALEN; i += 2) {
|
||||||
{
|
|
||||||
u16 val = mqnic_gecko_bmc_query(mqnic, 0x2003, 0 + index * ETH_ALEN + i);
|
u16 val = mqnic_gecko_bmc_query(mqnic, 0x2003, 0 + index * ETH_ALEN + i);
|
||||||
if (val < 0)
|
if (val < 0)
|
||||||
return val;
|
return val;
|
||||||
@ -675,17 +647,14 @@ static int mqnic_gecko_bmc_read_mac_list(struct mqnic_dev *mqnic, int count)
|
|||||||
count = min(count, MQNIC_MAX_IF);
|
count = min(count, MQNIC_MAX_IF);
|
||||||
|
|
||||||
mqnic->mac_count = 0;
|
mqnic->mac_count = 0;
|
||||||
for (k = 0; k < count; k++)
|
for (k = 0; k < count; k++) {
|
||||||
{
|
|
||||||
ret = mqnic_gecko_bmc_read_mac(mqnic, k, mac);
|
ret = mqnic_gecko_bmc_read_mac(mqnic, k, mac);
|
||||||
if (ret)
|
if (ret) {
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "Failed to read MAC from Gecko BMC");
|
dev_warn(mqnic->dev, "Failed to read MAC from Gecko BMC");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_valid_ether_addr(mac))
|
if (is_valid_ether_addr(mac)) {
|
||||||
{
|
|
||||||
memcpy(mqnic->mac_list[mqnic->mac_count], mac, ETH_ALEN);
|
memcpy(mqnic->mac_list[mqnic->mac_count], mac, ETH_ALEN);
|
||||||
mqnic->mac_count++;
|
mqnic->mac_count++;
|
||||||
}
|
}
|
||||||
@ -694,9 +663,7 @@ static int mqnic_gecko_bmc_read_mac_list(struct mqnic_dev *mqnic, int count)
|
|||||||
dev_info(mqnic->dev, "Read %d MACs from Gecko BMC", mqnic->mac_count);
|
dev_info(mqnic->dev, "Read %d MACs from Gecko BMC", mqnic->mac_count);
|
||||||
|
|
||||||
if (mqnic->mac_count == 0)
|
if (mqnic->mac_count == 0)
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "Failed to read any valid MACs from Gecko BMC");
|
dev_warn(mqnic->dev, "Failed to read any valid MACs from Gecko BMC");
|
||||||
}
|
|
||||||
|
|
||||||
return mqnic->mac_count;
|
return mqnic->mac_count;
|
||||||
}
|
}
|
||||||
@ -708,8 +675,7 @@ static int mqnic_gecko_board_init(struct mqnic_dev *mqnic)
|
|||||||
|
|
||||||
mqnic->mod_i2c_client_count = 0;
|
mqnic->mod_i2c_client_count = 0;
|
||||||
|
|
||||||
if (mqnic_i2c_init(mqnic))
|
if (mqnic_i2c_init(mqnic)) {
|
||||||
{
|
|
||||||
dev_err(mqnic->dev, "Failed to initialize I2C subsystem");
|
dev_err(mqnic->dev, "Failed to initialize I2C subsystem");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -738,12 +704,9 @@ static int mqnic_gecko_board_init(struct mqnic_dev *mqnic)
|
|||||||
mqnic->mod_i2c_client_count = 2;
|
mqnic->mod_i2c_client_count = 2;
|
||||||
|
|
||||||
// init BMC
|
// init BMC
|
||||||
if (mqnic_gecko_bmc_query(mqnic, 0x7006, 0) <= 0)
|
if (mqnic_gecko_bmc_query(mqnic, 0x7006, 0) <= 0) {
|
||||||
{
|
|
||||||
dev_warn(mqnic->dev, "Gecko BMC not responding");
|
dev_warn(mqnic->dev, "Gecko BMC not responding");
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
uint16_t v_l = mqnic_gecko_bmc_query(mqnic, 0x7005, 0);
|
uint16_t v_l = mqnic_gecko_bmc_query(mqnic, 0x7005, 0);
|
||||||
uint16_t v_h = mqnic_gecko_bmc_query(mqnic, 0x7006, 0);
|
uint16_t v_h = mqnic_gecko_bmc_query(mqnic, 0x7006, 0);
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -33,15 +33,15 @@ either expressed or implied, of The Regents of the University of California.
|
|||||||
|
|
||||||
#include "mqnic.h"
|
#include "mqnic.h"
|
||||||
|
|
||||||
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
|
int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_ptr,
|
||||||
|
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||||
{
|
{
|
||||||
struct device *dev = priv->dev;
|
struct device *dev = priv->dev;
|
||||||
struct mqnic_cq_ring *ring;
|
struct mqnic_cq_ring *ring;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||||
if (!ring)
|
if (!ring) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to allocate CQ ring");
|
dev_err(dev, "Failed to allocate CQ ring");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -53,9 +53,9 @@ int mqnic_create_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring **ring_pt
|
|||||||
ring->stride = roundup_pow_of_two(stride);
|
ring->stride = roundup_pow_of_two(stride);
|
||||||
|
|
||||||
ring->buf_size = ring->size * ring->stride;
|
ring->buf_size = ring->size * ring->stride;
|
||||||
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
|
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
|
||||||
if (!ring->buf)
|
&ring->buf_dma_addr, GFP_KERNEL);
|
||||||
{
|
if (!ring->buf) {
|
||||||
dev_err(dev, "Failed to allocate CQ ring DMA buffer");
|
dev_err(dev, "Failed to allocate CQ ring DMA buffer");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail_ring;
|
goto fail_ring;
|
||||||
@ -118,7 +118,8 @@ int mqnic_activate_cq_ring(struct mqnic_priv *priv, struct mqnic_cq_ring *ring,
|
|||||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG);
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_HEAD_PTR_REG);
|
||||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG);
|
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_CPL_QUEUE_TAIL_PTR_REG);
|
||||||
// set size and activate queue
|
// set size and activate queue
|
||||||
iowrite32(ilog2(ring->size) | MQNIC_CPL_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(ilog2(ring->size) | MQNIC_CPL_QUEUE_ACTIVE_MASK,
|
||||||
|
ring->hw_addr + MQNIC_CPL_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -153,6 +154,6 @@ void mqnic_cq_write_tail_ptr(struct mqnic_cq_ring *ring)
|
|||||||
|
|
||||||
void mqnic_arm_cq(struct mqnic_cq_ring *ring)
|
void mqnic_arm_cq(struct mqnic_cq_ring *ring)
|
||||||
{
|
{
|
||||||
iowrite32(ring->eq_index | MQNIC_CPL_QUEUE_ARM_MASK, ring->hw_addr+MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
|
iowrite32(ring->eq_index | MQNIC_CPL_QUEUE_ARM_MASK,
|
||||||
|
ring->hw_addr + MQNIC_CPL_QUEUE_INTERRUPT_INDEX_REG);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -57,22 +57,20 @@ static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *v
|
|||||||
size_t map_size = vma->vm_end - vma->vm_start;
|
size_t map_size = vma->vm_end - vma->vm_start;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (map_size > mqnic->hw_regs_size)
|
if (map_size > mqnic->hw_regs_size) {
|
||||||
{
|
dev_err(mqnic->dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <= %llu)",
|
||||||
dev_err(mqnic->dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <= %llu)", vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
|
vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = remap_pfn_range(vma, vma->vm_start, mqnic->hw_regs_phys >> PAGE_SHIFT, map_size, pgprot_noncached(vma->vm_page_prot));
|
ret = remap_pfn_range(vma, vma->vm_start, mqnic->hw_regs_phys >> PAGE_SHIFT,
|
||||||
|
map_size, pgprot_noncached(vma->vm_page_prot));
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
{
|
|
||||||
dev_err(mqnic->dev, "mqnic_map_registers: remap_pfn_range failed for registers region");
|
dev_err(mqnic->dev, "mqnic_map_registers: remap_pfn_range failed for registers region");
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
dev_dbg(mqnic->dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p",
|
||||||
dev_dbg(mqnic->dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p", &mqnic->hw_regs_phys, (void *)vma->vm_start);
|
&mqnic->hw_regs_phys, (void *)vma->vm_start);
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -81,21 +79,12 @@ static int mqnic_mmap(struct file *file, struct vm_area_struct *vma)
|
|||||||
{
|
{
|
||||||
struct miscdevice *miscdev = file->private_data;
|
struct miscdevice *miscdev = file->private_data;
|
||||||
struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
|
struct mqnic_dev *mqnic = container_of(miscdev, struct mqnic_dev, misc_dev);
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (vma->vm_pgoff == 0)
|
if (vma->vm_pgoff == 0)
|
||||||
{
|
return mqnic_map_registers(mqnic, vma);
|
||||||
ret = mqnic_map_registers(mqnic, vma);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
goto fail_invalid_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
dev_err(mqnic->dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu",
|
||||||
|
vma->vm_pgoff);
|
||||||
fail_invalid_offset:
|
|
||||||
dev_err(mqnic->dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu", vma->vm_pgoff);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -33,15 +33,15 @@ either expressed or implied, of The Regents of the University of California.
|
|||||||
|
|
||||||
#include "mqnic.h"
|
#include "mqnic.h"
|
||||||
|
|
||||||
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
|
int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_ptr,
|
||||||
|
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||||
{
|
{
|
||||||
struct device *dev = priv->dev;
|
struct device *dev = priv->dev;
|
||||||
struct mqnic_eq_ring *ring;
|
struct mqnic_eq_ring *ring;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||||
if (!ring)
|
if (!ring) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to allocate EQ ring");
|
dev_err(dev, "Failed to allocate EQ ring");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -53,9 +53,9 @@ int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_pt
|
|||||||
ring->stride = roundup_pow_of_two(stride);
|
ring->stride = roundup_pow_of_two(stride);
|
||||||
|
|
||||||
ring->buf_size = ring->size * ring->stride;
|
ring->buf_size = ring->size * ring->stride;
|
||||||
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
|
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
|
||||||
if (!ring->buf)
|
&ring->buf_dma_addr, GFP_KERNEL);
|
||||||
{
|
if (!ring->buf) {
|
||||||
dev_err(dev, "Failed to allocate EQ ring DMA buffer");
|
dev_err(dev, "Failed to allocate EQ ring DMA buffer");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail_ring;
|
goto fail_ring;
|
||||||
@ -77,8 +77,10 @@ int mqnic_create_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_pt
|
|||||||
// set interrupt index
|
// set interrupt index
|
||||||
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
iowrite32(0, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||||
// set pointers
|
// set pointers
|
||||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask,
|
||||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
||||||
|
iowrite32(ring->tail_ptr & ring->hw_ptr_mask,
|
||||||
|
ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
||||||
// set size
|
// set size
|
||||||
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(ilog2(ring->size), ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
|
|
||||||
@ -103,7 +105,8 @@ void mqnic_destroy_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring **ring_
|
|||||||
kfree(ring);
|
kfree(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring, int int_index)
|
int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
|
||||||
|
int int_index)
|
||||||
{
|
{
|
||||||
ring->int_index = int_index;
|
ring->int_index = int_index;
|
||||||
|
|
||||||
@ -115,10 +118,13 @@ int mqnic_activate_eq_ring(struct mqnic_priv *priv, struct mqnic_eq_ring *ring,
|
|||||||
// set interrupt index
|
// set interrupt index
|
||||||
iowrite32(int_index, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
iowrite32(int_index, ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||||
// set pointers
|
// set pointers
|
||||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask,
|
||||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr+MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
ring->hw_addr + MQNIC_EVENT_QUEUE_HEAD_PTR_REG);
|
||||||
|
iowrite32(ring->tail_ptr & ring->hw_ptr_mask,
|
||||||
|
ring->hw_addr + MQNIC_EVENT_QUEUE_TAIL_PTR_REG);
|
||||||
// set size and activate queue
|
// set size and activate queue
|
||||||
iowrite32(ilog2(ring->size) | MQNIC_EVENT_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(ilog2(ring->size) | MQNIC_EVENT_QUEUE_ACTIVE_MASK,
|
||||||
|
ring->hw_addr + MQNIC_EVENT_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -153,7 +159,8 @@ void mqnic_eq_write_tail_ptr(struct mqnic_eq_ring *ring)
|
|||||||
|
|
||||||
void mqnic_arm_eq(struct mqnic_eq_ring *ring)
|
void mqnic_arm_eq(struct mqnic_eq_ring *ring)
|
||||||
{
|
{
|
||||||
iowrite32(ring->int_index | MQNIC_EVENT_QUEUE_ARM_MASK, ring->hw_addr+MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
iowrite32(ring->int_index | MQNIC_EVENT_QUEUE_ARM_MASK,
|
||||||
|
ring->hw_addr + MQNIC_EVENT_QUEUE_INTERRUPT_INDEX_REG);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
|
void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
|
||||||
@ -165,9 +172,7 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
|
|||||||
int done = 0;
|
int done = 0;
|
||||||
|
|
||||||
if (unlikely(!priv->port_up))
|
if (unlikely(!priv->port_up))
|
||||||
{
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
// read head pointer from NIC
|
// read head pointer from NIC
|
||||||
mqnic_eq_read_head_ptr(eq_ring);
|
mqnic_eq_read_head_ptr(eq_ring);
|
||||||
@ -175,48 +180,43 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
|
|||||||
eq_tail_ptr = eq_ring->tail_ptr;
|
eq_tail_ptr = eq_ring->tail_ptr;
|
||||||
eq_index = eq_tail_ptr & eq_ring->size_mask;
|
eq_index = eq_tail_ptr & eq_ring->size_mask;
|
||||||
|
|
||||||
while (eq_ring->head_ptr != eq_tail_ptr)
|
while (eq_ring->head_ptr != eq_tail_ptr) {
|
||||||
{
|
|
||||||
event = (struct mqnic_event *)(eq_ring->buf + eq_index * eq_ring->stride);
|
event = (struct mqnic_event *)(eq_ring->buf + eq_index * eq_ring->stride);
|
||||||
|
|
||||||
if (event->type == MQNIC_EVENT_TYPE_TX_CPL)
|
if (event->type == MQNIC_EVENT_TYPE_TX_CPL) {
|
||||||
{
|
|
||||||
// transmit completion event
|
// transmit completion event
|
||||||
if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count))
|
if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count)) {
|
||||||
{
|
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)",
|
||||||
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", priv->port, le16_to_cpu(event->source), eq_index, le16_to_cpu(event->type));
|
priv->port, le16_to_cpu(event->source), eq_index,
|
||||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true);
|
le16_to_cpu(event->type));
|
||||||
}
|
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||||
else
|
event, MQNIC_EVENT_SIZE, true);
|
||||||
{
|
} else {
|
||||||
struct mqnic_cq_ring *cq_ring = priv->tx_cpl_ring[le16_to_cpu(event->source)];
|
struct mqnic_cq_ring *cq_ring =
|
||||||
|
priv->tx_cpl_ring[le16_to_cpu(event->source)];
|
||||||
if (likely(cq_ring && cq_ring->handler))
|
if (likely(cq_ring && cq_ring->handler))
|
||||||
{
|
|
||||||
cq_ring->handler(cq_ring);
|
cq_ring->handler(cq_ring);
|
||||||
}
|
}
|
||||||
}
|
} else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL) {
|
||||||
}
|
|
||||||
else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL)
|
|
||||||
{
|
|
||||||
// receive completion event
|
// receive completion event
|
||||||
if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count))
|
if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count)) {
|
||||||
{
|
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)",
|
||||||
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", priv->port, le16_to_cpu(event->source), eq_index, le16_to_cpu(event->type));
|
priv->port, le16_to_cpu(event->source), eq_index,
|
||||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true);
|
le16_to_cpu(event->type));
|
||||||
}
|
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||||
else
|
event, MQNIC_EVENT_SIZE, true);
|
||||||
{
|
} else {
|
||||||
struct mqnic_cq_ring *cq_ring = priv->rx_cpl_ring[le16_to_cpu(event->source)];
|
struct mqnic_cq_ring *cq_ring =
|
||||||
|
priv->rx_cpl_ring[le16_to_cpu(event->source)];
|
||||||
if (likely(cq_ring && cq_ring->handler))
|
if (likely(cq_ring && cq_ring->handler))
|
||||||
{
|
|
||||||
cq_ring->handler(cq_ring);
|
cq_ring->handler(cq_ring);
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
}
|
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event type %d (index %d, source %d)",
|
||||||
else
|
priv->port, le16_to_cpu(event->type), eq_index,
|
||||||
{
|
le16_to_cpu(event->source));
|
||||||
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event type %d (index %d, source %d)", priv->port, le16_to_cpu(event->type), eq_index, le16_to_cpu(event->source));
|
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, event, MQNIC_EVENT_SIZE, true);
|
event, MQNIC_EVENT_SIZE, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
done++;
|
done++;
|
||||||
@ -229,4 +229,3 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
|
|||||||
eq_ring->tail_ptr = eq_tail_ptr;
|
eq_ring->tail_ptr = eq_tail_ptr;
|
||||||
mqnic_eq_write_tail_ptr(eq_ring);
|
mqnic_eq_write_tail_ptr(eq_ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -40,7 +40,8 @@ either expressed or implied, of The Regents of the University of California.
|
|||||||
#define SFF_MODULE_ID_QSFP_PLUS 0x0d
|
#define SFF_MODULE_ID_QSFP_PLUS 0x0d
|
||||||
#define SFF_MODULE_ID_QSFP28 0x11
|
#define SFF_MODULE_ID_QSFP28 0x11
|
||||||
|
|
||||||
static void mqnic_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo)
|
static void mqnic_get_drvinfo(struct net_device *ndev,
|
||||||
|
struct ethtool_drvinfo *drvinfo)
|
||||||
{
|
{
|
||||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||||
struct mqnic_dev *mdev = priv->mdev;
|
struct mqnic_dev *mdev = priv->mdev;
|
||||||
@ -48,11 +49,13 @@ static void mqnic_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *d
|
|||||||
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
|
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
|
||||||
strlcpy(drvinfo->version, DRIVER_VERSION, sizeof(drvinfo->version));
|
strlcpy(drvinfo->version, DRIVER_VERSION, sizeof(drvinfo->version));
|
||||||
|
|
||||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d", mdev->fw_ver >> 16, mdev->fw_ver & 0xffff);
|
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d",
|
||||||
|
mdev->fw_ver >> 16, mdev->fw_ver & 0xffff);
|
||||||
strlcpy(drvinfo->bus_info, dev_name(mdev->dev), sizeof(drvinfo->bus_info));
|
strlcpy(drvinfo->bus_info, dev_name(mdev->dev), sizeof(drvinfo->bus_info));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mqnic_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
|
static int mqnic_get_ts_info(struct net_device *ndev,
|
||||||
|
struct ethtool_ts_info *info)
|
||||||
{
|
{
|
||||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||||
struct mqnic_dev *mdev = priv->mdev;
|
struct mqnic_dev *mdev = priv->mdev;
|
||||||
@ -65,30 +68,23 @@ static int mqnic_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *in
|
|||||||
if (!(priv->if_features & MQNIC_IF_FEATURE_PTP_TS) || !mdev->ptp_clock)
|
if (!(priv->if_features & MQNIC_IF_FEATURE_PTP_TS) || !mdev->ptp_clock)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
info->so_timestamping =
|
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
|
||||||
SOF_TIMESTAMPING_TX_HARDWARE |
|
SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
|
||||||
SOF_TIMESTAMPING_RX_HARDWARE |
|
|
||||||
SOF_TIMESTAMPING_RAW_HARDWARE;
|
|
||||||
|
|
||||||
info->tx_types =
|
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
|
||||||
BIT(HWTSTAMP_TX_OFF) |
|
|
||||||
BIT(HWTSTAMP_TX_ON);
|
|
||||||
|
|
||||||
info->rx_filters =
|
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
|
||||||
BIT(HWTSTAMP_FILTER_NONE) |
|
|
||||||
BIT(HWTSTAMP_FILTER_ALL);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mqnic_read_module_eeprom(struct net_device *ndev, u16 offset, u16 len, u8 *data)
|
static int mqnic_read_module_eeprom(struct net_device *ndev,
|
||||||
|
u16 offset, u16 len, u8 * data)
|
||||||
{
|
{
|
||||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||||
|
|
||||||
if (!priv->mod_i2c_client)
|
if (!priv->mod_i2c_client)
|
||||||
{
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
|
||||||
|
|
||||||
if (len > I2C_SMBUS_BLOCK_MAX)
|
if (len > I2C_SMBUS_BLOCK_MAX)
|
||||||
len = I2C_SMBUS_BLOCK_MAX;
|
len = I2C_SMBUS_BLOCK_MAX;
|
||||||
@ -96,7 +92,8 @@ static int mqnic_read_module_eeprom(struct net_device *ndev, u16 offset, u16 len
|
|||||||
return i2c_smbus_read_i2c_block_data(priv->mod_i2c_client, offset, len, data);
|
return i2c_smbus_read_i2c_block_data(priv->mod_i2c_client, offset, len, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mqnic_get_module_info(struct net_device *ndev, struct ethtool_modinfo *modinfo)
|
static int mqnic_get_module_info(struct net_device *ndev,
|
||||||
|
struct ethtool_modinfo *modinfo)
|
||||||
{
|
{
|
||||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||||
int read_len = 0;
|
int read_len = 0;
|
||||||
@ -120,13 +117,10 @@ static int mqnic_get_module_info(struct net_device *ndev, struct ethtool_modinfo
|
|||||||
break;
|
break;
|
||||||
case SFF_MODULE_ID_QSFP_PLUS:
|
case SFF_MODULE_ID_QSFP_PLUS:
|
||||||
// check revision at address 1
|
// check revision at address 1
|
||||||
if (data[1] >= 0x03)
|
if (data[1] >= 0x03) {
|
||||||
{
|
|
||||||
modinfo->type = ETH_MODULE_SFF_8636;
|
modinfo->type = ETH_MODULE_SFF_8636;
|
||||||
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
|
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
modinfo->type = ETH_MODULE_SFF_8436;
|
modinfo->type = ETH_MODULE_SFF_8436;
|
||||||
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
|
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
|
||||||
}
|
}
|
||||||
@ -143,7 +137,8 @@ static int mqnic_get_module_info(struct net_device *ndev, struct ethtool_modinfo
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mqnic_get_module_eeprom(struct net_device *ndev, struct ethtool_eeprom *eeprom, u8 *data)
|
static int mqnic_get_module_eeprom(struct net_device *ndev,
|
||||||
|
struct ethtool_eeprom *eeprom, u8 * data)
|
||||||
{
|
{
|
||||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||||
int i = 0;
|
int i = 0;
|
||||||
@ -154,15 +149,14 @@ static int mqnic_get_module_eeprom(struct net_device *ndev, struct ethtool_eepro
|
|||||||
|
|
||||||
memset(data, 0, eeprom->len);
|
memset(data, 0, eeprom->len);
|
||||||
|
|
||||||
while (i < eeprom->len)
|
while (i < eeprom->len) {
|
||||||
{
|
read_len = mqnic_read_module_eeprom(ndev, eeprom->offset + i,
|
||||||
read_len = mqnic_read_module_eeprom(ndev, eeprom->offset+i, eeprom->len-i, data+i);
|
eeprom->len - i, data + i);
|
||||||
|
|
||||||
if (read_len == 0)
|
if (read_len == 0)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (read_len < 0)
|
if (read_len < 0) {
|
||||||
{
|
|
||||||
dev_err(priv->dev, "Failed to read module EEPROM");
|
dev_err(priv->dev, "Failed to read module EEPROM");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -179,4 +173,3 @@ const struct ethtool_ops mqnic_ethtool_ops = {
|
|||||||
.get_module_info = mqnic_get_module_info,
|
.get_module_info = mqnic_get_module_info,
|
||||||
.get_module_eeprom = mqnic_get_module_eeprom,
|
.get_module_eeprom = mqnic_get_module_eeprom,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -38,28 +38,20 @@ static void mqnic_i2c_set_scl(void *data, int state)
|
|||||||
struct mqnic_i2c_bus *bus = data;
|
struct mqnic_i2c_bus *bus = data;
|
||||||
|
|
||||||
if (state)
|
if (state)
|
||||||
{
|
|
||||||
iowrite32(ioread32(bus->scl_out_reg) | bus->scl_out_mask, bus->scl_out_reg);
|
iowrite32(ioread32(bus->scl_out_reg) | bus->scl_out_mask, bus->scl_out_reg);
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
iowrite32(ioread32(bus->scl_out_reg) & ~bus->scl_out_mask, bus->scl_out_reg);
|
iowrite32(ioread32(bus->scl_out_reg) & ~bus->scl_out_mask, bus->scl_out_reg);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static void mqnic_i2c_set_sda(void *data, int state)
|
static void mqnic_i2c_set_sda(void *data, int state)
|
||||||
{
|
{
|
||||||
struct mqnic_i2c_bus *bus = data;
|
struct mqnic_i2c_bus *bus = data;
|
||||||
|
|
||||||
if (state)
|
if (state)
|
||||||
{
|
|
||||||
iowrite32(ioread32(bus->sda_out_reg) | bus->sda_out_mask, bus->sda_out_reg);
|
iowrite32(ioread32(bus->sda_out_reg) | bus->sda_out_mask, bus->sda_out_reg);
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
iowrite32(ioread32(bus->sda_out_reg) & ~bus->sda_out_mask, bus->sda_out_reg);
|
iowrite32(ioread32(bus->sda_out_reg) & ~bus->sda_out_mask, bus->sda_out_reg);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static int mqnic_i2c_get_scl(void *data)
|
static int mqnic_i2c_get_scl(void *data)
|
||||||
{
|
{
|
||||||
@ -115,10 +107,10 @@ struct mqnic_i2c_bus *mqnic_i2c_bus_create(struct mqnic_dev *mqnic, u8 __iomem *
|
|||||||
adapter->owner = THIS_MODULE;
|
adapter->owner = THIS_MODULE;
|
||||||
adapter->algo_data = algo;
|
adapter->algo_data = algo;
|
||||||
adapter->dev.parent = mqnic->dev;
|
adapter->dev.parent = mqnic->dev;
|
||||||
snprintf(adapter->name, sizeof(adapter->name), "%s I2C%d", mqnic->name, mqnic->i2c_adapter_count);
|
snprintf(adapter->name, sizeof(adapter->name), "%s I2C%d", mqnic->name,
|
||||||
|
mqnic->i2c_adapter_count);
|
||||||
|
|
||||||
if (i2c_bit_add_bus(adapter))
|
if (i2c_bit_add_bus(adapter)) {
|
||||||
{
|
|
||||||
dev_err(mqnic->dev, "Failed to register I2C adapter");
|
dev_err(mqnic->dev, "Failed to register I2C adapter");
|
||||||
goto err_free_bus;
|
goto err_free_bus;
|
||||||
}
|
}
|
||||||
@ -180,9 +172,10 @@ int mqnic_i2c_init(struct mqnic_dev *mqnic)
|
|||||||
|
|
||||||
void mqnic_i2c_deinit(struct mqnic_dev *mqnic)
|
void mqnic_i2c_deinit(struct mqnic_dev *mqnic)
|
||||||
{
|
{
|
||||||
while (!list_empty(&mqnic->i2c_bus))
|
struct mqnic_i2c_bus *bus;
|
||||||
{
|
|
||||||
struct mqnic_i2c_bus *bus = list_first_entry(&mqnic->i2c_bus, typeof(*bus), head);
|
while (!list_empty(&mqnic->i2c_bus)) {
|
||||||
|
bus = list_first_entry(&mqnic->i2c_bus, typeof(*bus), head);
|
||||||
mqnic_i2c_bus_release(bus);
|
mqnic_i2c_bus_release(bus);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -62,13 +62,10 @@ static unsigned int mqnic_get_free_id(void)
|
|||||||
unsigned int id = 0;
|
unsigned int id = 0;
|
||||||
bool available = false;
|
bool available = false;
|
||||||
|
|
||||||
while (!available)
|
while (!available) {
|
||||||
{
|
|
||||||
available = true;
|
available = true;
|
||||||
list_for_each_entry(mqnic, &mqnic_devices, dev_list_node)
|
list_for_each_entry(mqnic, &mqnic_devices, dev_list_node) {
|
||||||
{
|
if (mqnic->id == id) {
|
||||||
if (mqnic->id == id)
|
|
||||||
{
|
|
||||||
available = false;
|
available = false;
|
||||||
id++;
|
id++;
|
||||||
break;
|
break;
|
||||||
@ -86,8 +83,7 @@ static irqreturn_t mqnic_interrupt(int irq, void *data)
|
|||||||
|
|
||||||
int k, l;
|
int k, l;
|
||||||
|
|
||||||
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
|
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++) {
|
||||||
{
|
|
||||||
if (unlikely(!mqnic->ndev[k]))
|
if (unlikely(!mqnic->ndev[k]))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -96,13 +92,11 @@ static irqreturn_t mqnic_interrupt(int irq, void *data)
|
|||||||
if (unlikely(!priv->port_up))
|
if (unlikely(!priv->port_up))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
for (l = 0; l < priv->event_queue_count; l++)
|
for (l = 0; l < priv->event_queue_count; l++) {
|
||||||
{
|
|
||||||
if (unlikely(!priv->event_ring[l]))
|
if (unlikely(!priv->event_ring[l]))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (priv->event_ring[l]->irq == irq)
|
if (priv->event_ring[l]->irq == irq) {
|
||||||
{
|
|
||||||
mqnic_process_eq(priv->ndev, priv->event_ring[l]);
|
mqnic_process_eq(priv->ndev, priv->event_ring[l]);
|
||||||
mqnic_arm_eq(priv->event_ring[l]);
|
mqnic_arm_eq(priv->event_ring[l]);
|
||||||
}
|
}
|
||||||
@ -133,14 +127,22 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_DEVCTL, &devctl);
|
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_DEVCTL, &devctl);
|
||||||
pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &lnkcap);
|
pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &lnkcap);
|
||||||
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_LNKSTA, &lnksta);
|
pci_read_config_word(pdev, pdev->pcie_cap + PCI_EXP_LNKSTA, &lnksta);
|
||||||
dev_info(dev, " Max payload size: %d bytes", 128 << ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5));
|
dev_info(dev, " Max payload size: %d bytes",
|
||||||
dev_info(dev, " Max read request size: %d bytes", 128 << ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12));
|
128 << ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5));
|
||||||
dev_info(dev, " Link capability: gen %d x%d", lnkcap & PCI_EXP_LNKCAP_SLS, (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
|
dev_info(dev, " Max read request size: %d bytes",
|
||||||
dev_info(dev, " Link status: gen %d x%d", lnksta & PCI_EXP_LNKSTA_CLS, (lnksta & PCI_EXP_LNKSTA_NLW) >> 4);
|
128 << ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12));
|
||||||
dev_info(dev, " Relaxed ordering: %s", devctl & PCI_EXP_DEVCTL_RELAX_EN ? "enabled" : "disabled");
|
dev_info(dev, " Link capability: gen %d x%d",
|
||||||
dev_info(dev, " Phantom functions: %s", devctl & PCI_EXP_DEVCTL_PHANTOM ? "enabled" : "disabled");
|
lnkcap & PCI_EXP_LNKCAP_SLS, (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
|
||||||
dev_info(dev, " Extended tags: %s", devctl & PCI_EXP_DEVCTL_EXT_TAG ? "enabled" : "disabled");
|
dev_info(dev, " Link status: gen %d x%d",
|
||||||
dev_info(dev, " No snoop: %s", devctl & PCI_EXP_DEVCTL_NOSNOOP_EN ? "enabled" : "disabled");
|
lnksta & PCI_EXP_LNKSTA_CLS, (lnksta & PCI_EXP_LNKSTA_NLW) >> 4);
|
||||||
|
dev_info(dev, " Relaxed ordering: %s",
|
||||||
|
devctl & PCI_EXP_DEVCTL_RELAX_EN ? "enabled" : "disabled");
|
||||||
|
dev_info(dev, " Phantom functions: %s",
|
||||||
|
devctl & PCI_EXP_DEVCTL_PHANTOM ? "enabled" : "disabled");
|
||||||
|
dev_info(dev, " Extended tags: %s",
|
||||||
|
devctl & PCI_EXP_DEVCTL_EXT_TAG ? "enabled" : "disabled");
|
||||||
|
dev_info(dev, " No snoop: %s",
|
||||||
|
devctl & PCI_EXP_DEVCTL_NOSNOOP_EN ? "enabled" : "disabled");
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
dev_info(dev, " NUMA node: %d", pdev->dev.numa_node);
|
dev_info(dev, " NUMA node: %d", pdev->dev.numa_node);
|
||||||
@ -149,8 +151,9 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
pcie_print_link_status(pdev);
|
pcie_print_link_status(pdev);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!(mqnic = devm_kzalloc(dev, sizeof(*mqnic), GFP_KERNEL)))
|
mqnic = devm_kzalloc(dev, sizeof(*mqnic), GFP_KERNEL);
|
||||||
{
|
if (!mqnic) {
|
||||||
|
dev_err(dev, "Failed to allocate memory");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,24 +170,22 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
snprintf(mqnic->name, sizeof(mqnic->name), DRIVER_NAME "%d", mqnic->id);
|
snprintf(mqnic->name, sizeof(mqnic->name), DRIVER_NAME "%d", mqnic->id);
|
||||||
|
|
||||||
// Disable ASPM
|
// Disable ASPM
|
||||||
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
|
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
|
||||||
|
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
|
||||||
|
|
||||||
// Enable device
|
// Enable device
|
||||||
ret = pci_enable_device_mem(pdev);
|
ret = pci_enable_device_mem(pdev);
|
||||||
if (ret)
|
if (ret) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to enable PCI device");
|
dev_err(dev, "Failed to enable PCI device");
|
||||||
goto fail_enable_device;
|
goto fail_enable_device;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set mask
|
// Set mask
|
||||||
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
||||||
if (ret)
|
if (ret) {
|
||||||
{
|
|
||||||
dev_warn(dev, "Warning: failed to set 64 bit PCI DMA mask");
|
dev_warn(dev, "Warning: failed to set 64 bit PCI DMA mask");
|
||||||
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
||||||
if (ret)
|
if (ret) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to set PCI DMA mask");
|
dev_err(dev, "Failed to set PCI DMA mask");
|
||||||
goto fail_regions;
|
goto fail_regions;
|
||||||
}
|
}
|
||||||
@ -195,8 +196,7 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
|
|
||||||
// Reserve regions
|
// Reserve regions
|
||||||
ret = pci_request_regions(pdev, DRIVER_NAME);
|
ret = pci_request_regions(pdev, DRIVER_NAME);
|
||||||
if (ret)
|
if (ret) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to reserve regions");
|
dev_err(dev, "Failed to reserve regions");
|
||||||
goto fail_regions;
|
goto fail_regions;
|
||||||
}
|
}
|
||||||
@ -211,31 +211,26 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
// Map BARs
|
// Map BARs
|
||||||
dev_info(dev, "Control BAR size: %llu", mqnic->hw_regs_size);
|
dev_info(dev, "Control BAR size: %llu", mqnic->hw_regs_size);
|
||||||
mqnic->hw_addr = pci_ioremap_bar(pdev, 0);
|
mqnic->hw_addr = pci_ioremap_bar(pdev, 0);
|
||||||
if (!mqnic->hw_addr)
|
if (!mqnic->hw_addr) {
|
||||||
{
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
dev_err(dev, "Failed to map control BAR");
|
dev_err(dev, "Failed to map control BAR");
|
||||||
goto fail_map_bars;
|
goto fail_map_bars;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mqnic->app_hw_regs_size)
|
if (mqnic->app_hw_regs_size) {
|
||||||
{
|
|
||||||
dev_info(dev, "Application BAR size: %llu", mqnic->app_hw_regs_size);
|
dev_info(dev, "Application BAR size: %llu", mqnic->app_hw_regs_size);
|
||||||
mqnic->app_hw_addr = pci_ioremap_bar(pdev, 2);
|
mqnic->app_hw_addr = pci_ioremap_bar(pdev, 2);
|
||||||
if (!mqnic->app_hw_addr)
|
if (!mqnic->app_hw_addr) {
|
||||||
{
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
dev_err(dev, "Failed to map application BAR");
|
dev_err(dev, "Failed to map application BAR");
|
||||||
goto fail_map_bars;
|
goto fail_map_bars;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mqnic->ram_hw_regs_size)
|
if (mqnic->ram_hw_regs_size) {
|
||||||
{
|
|
||||||
dev_info(dev, "RAM BAR size: %llu", mqnic->ram_hw_regs_size);
|
dev_info(dev, "RAM BAR size: %llu", mqnic->ram_hw_regs_size);
|
||||||
mqnic->ram_hw_addr = pci_ioremap_bar(pdev, 4);
|
mqnic->ram_hw_addr = pci_ioremap_bar(pdev, 4);
|
||||||
if (!mqnic->ram_hw_addr)
|
if (!mqnic->ram_hw_addr) {
|
||||||
{
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
dev_err(dev, "Failed to map RAM BAR");
|
dev_err(dev, "Failed to map RAM BAR");
|
||||||
goto fail_map_bars;
|
goto fail_map_bars;
|
||||||
@ -243,8 +238,7 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if device needs to be reset
|
// Check if device needs to be reset
|
||||||
if (ioread32(mqnic->hw_addr) == 0xffffffff)
|
if (ioread32(mqnic->hw_addr) == 0xffffffff) {
|
||||||
{
|
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
dev_err(dev, "Device needs to be reset");
|
dev_err(dev, "Device needs to be reset");
|
||||||
goto fail_map_bars;
|
goto fail_map_bars;
|
||||||
@ -276,28 +270,26 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
dev_info(dev, "IF CSR offset: 0x%08x", mqnic->if_csr_offset);
|
dev_info(dev, "IF CSR offset: 0x%08x", mqnic->if_csr_offset);
|
||||||
|
|
||||||
// check BAR size
|
// check BAR size
|
||||||
if (mqnic->if_count*mqnic->if_stride > mqnic->hw_regs_size)
|
if (mqnic->if_count * mqnic->if_stride > mqnic->hw_regs_size) {
|
||||||
{
|
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
dev_err(dev, "Invalid BAR configuration (%d IF * 0x%x > 0x%llx)", mqnic->if_count, mqnic->if_stride, mqnic->hw_regs_size);
|
dev_err(dev, "Invalid BAR configuration (%d IF * 0x%x > 0x%llx)",
|
||||||
|
mqnic->if_count, mqnic->if_stride, mqnic->hw_regs_size);
|
||||||
goto fail_map_bars;
|
goto fail_map_bars;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate MSI IRQs
|
// Allocate MSI IRQs
|
||||||
mqnic->irq_count = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
|
mqnic->irq_count = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
|
||||||
if (mqnic->irq_count < 0)
|
if (mqnic->irq_count < 0) {
|
||||||
{
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
dev_err(dev, "Failed to allocate IRQs");
|
dev_err(dev, "Failed to allocate IRQs");
|
||||||
goto fail_map_bars;
|
goto fail_map_bars;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up interrupts
|
// Set up interrupts
|
||||||
for (k = 0; k < mqnic->irq_count; k++)
|
for (k = 0; k < mqnic->irq_count; k++) {
|
||||||
{
|
ret = pci_request_irq(pdev, k, mqnic_interrupt, NULL,
|
||||||
ret = pci_request_irq(pdev, k, mqnic_interrupt, NULL, mqnic, "%s-%d", mqnic->name, k);
|
mqnic, "%s-%d", mqnic->name, k);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to request IRQ");
|
dev_err(dev, "Failed to request IRQ");
|
||||||
goto fail_irq;
|
goto fail_irq;
|
||||||
}
|
}
|
||||||
@ -307,8 +299,7 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
|
|
||||||
// Board-specific init
|
// Board-specific init
|
||||||
ret = mqnic_board_init(mqnic);
|
ret = mqnic_board_init(mqnic);
|
||||||
if (ret)
|
if (ret) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to initialize board");
|
dev_err(dev, "Failed to initialize board");
|
||||||
goto fail_board;
|
goto fail_board;
|
||||||
}
|
}
|
||||||
@ -318,28 +309,23 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
|
|
||||||
// register PHC
|
// register PHC
|
||||||
if (mqnic->phc_count)
|
if (mqnic->phc_count)
|
||||||
{
|
|
||||||
mqnic_register_phc(mqnic);
|
mqnic_register_phc(mqnic);
|
||||||
}
|
|
||||||
|
|
||||||
// Set up interfaces
|
// Set up interfaces
|
||||||
if (mqnic->if_count > MQNIC_MAX_IF)
|
if (mqnic->if_count > MQNIC_MAX_IF)
|
||||||
mqnic->if_count = MQNIC_MAX_IF;
|
mqnic->if_count = MQNIC_MAX_IF;
|
||||||
|
|
||||||
for (k = 0; k < mqnic->if_count; k++)
|
for (k = 0; k < mqnic->if_count; k++) {
|
||||||
{
|
|
||||||
dev_info(dev, "Creating interface %d", k);
|
dev_info(dev, "Creating interface %d", k);
|
||||||
ret = mqnic_init_netdev(mqnic, k, mqnic->hw_addr + k * mqnic->if_stride);
|
ret = mqnic_init_netdev(mqnic, k, mqnic->hw_addr + k * mqnic->if_stride);
|
||||||
if (ret)
|
if (ret) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to create net_device");
|
dev_err(dev, "Failed to create net_device");
|
||||||
goto fail_init_netdev;
|
goto fail_init_netdev;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// pass module I2C clients to net_device instances
|
// pass module I2C clients to net_device instances
|
||||||
for (k = 0; k < mqnic->if_count; k++)
|
for (k = 0; k < mqnic->if_count; k++) {
|
||||||
{
|
|
||||||
struct mqnic_priv *priv = netdev_priv(mqnic->ndev[k]);
|
struct mqnic_priv *priv = netdev_priv(mqnic->ndev[k]);
|
||||||
priv->mod_i2c_client = mqnic->mod_i2c_client[k];
|
priv->mod_i2c_client = mqnic->mod_i2c_client[k];
|
||||||
}
|
}
|
||||||
@ -350,8 +336,7 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
mqnic->misc_dev.parent = dev;
|
mqnic->misc_dev.parent = dev;
|
||||||
|
|
||||||
ret = misc_register(&mqnic->misc_dev);
|
ret = misc_register(&mqnic->misc_dev);
|
||||||
if (ret)
|
if (ret) {
|
||||||
{
|
|
||||||
dev_err(dev, "misc_register failed: %d\n", ret);
|
dev_err(dev, "misc_register failed: %d\n", ret);
|
||||||
goto fail_miscdev;
|
goto fail_miscdev;
|
||||||
}
|
}
|
||||||
@ -369,20 +354,14 @@ static int mqnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
|
|||||||
fail_miscdev:
|
fail_miscdev:
|
||||||
fail_init_netdev:
|
fail_init_netdev:
|
||||||
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
|
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
|
||||||
{
|
|
||||||
if (mqnic->ndev[k])
|
if (mqnic->ndev[k])
|
||||||
{
|
|
||||||
mqnic_destroy_netdev(mqnic->ndev[k]);
|
mqnic_destroy_netdev(mqnic->ndev[k]);
|
||||||
}
|
|
||||||
}
|
|
||||||
mqnic_unregister_phc(mqnic);
|
mqnic_unregister_phc(mqnic);
|
||||||
pci_clear_master(pdev);
|
pci_clear_master(pdev);
|
||||||
fail_board:
|
fail_board:
|
||||||
mqnic_board_deinit(mqnic);
|
mqnic_board_deinit(mqnic);
|
||||||
for (k = 0; k < mqnic->irq_count; k++)
|
for (k = 0; k < mqnic->irq_count; k++)
|
||||||
{
|
|
||||||
pci_free_irq(pdev, k, mqnic);
|
pci_free_irq(pdev, k, mqnic);
|
||||||
}
|
|
||||||
fail_irq:
|
fail_irq:
|
||||||
pci_free_irq_vectors(pdev);
|
pci_free_irq_vectors(pdev);
|
||||||
fail_map_bars:
|
fail_map_bars:
|
||||||
@ -417,21 +396,15 @@ static void mqnic_pci_remove(struct pci_dev *pdev)
|
|||||||
spin_unlock(&mqnic_devices_lock);
|
spin_unlock(&mqnic_devices_lock);
|
||||||
|
|
||||||
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
|
for (k = 0; k < ARRAY_SIZE(mqnic->ndev); k++)
|
||||||
{
|
|
||||||
if (mqnic->ndev[k])
|
if (mqnic->ndev[k])
|
||||||
{
|
|
||||||
mqnic_destroy_netdev(mqnic->ndev[k]);
|
mqnic_destroy_netdev(mqnic->ndev[k]);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mqnic_unregister_phc(mqnic);
|
mqnic_unregister_phc(mqnic);
|
||||||
|
|
||||||
pci_clear_master(pdev);
|
pci_clear_master(pdev);
|
||||||
mqnic_board_deinit(mqnic);
|
mqnic_board_deinit(mqnic);
|
||||||
for (k = 0; k < mqnic->irq_count; k++)
|
for (k = 0; k < mqnic->irq_count; k++)
|
||||||
{
|
|
||||||
pci_free_irq(pdev, k, mqnic);
|
pci_free_irq(pdev, k, mqnic);
|
||||||
}
|
|
||||||
pci_free_irq_vectors(pdev);
|
pci_free_irq_vectors(pdev);
|
||||||
if (mqnic->hw_addr)
|
if (mqnic->hw_addr)
|
||||||
pci_iounmap(pdev, mqnic->hw_addr);
|
pci_iounmap(pdev, mqnic->hw_addr);
|
||||||
@ -470,4 +443,3 @@ static void __exit mqnic_exit(void)
|
|||||||
|
|
||||||
module_init(mqnic_init);
|
module_init(mqnic_init);
|
||||||
module_exit(mqnic_exit);
|
module_exit(mqnic_exit);
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -42,29 +42,27 @@ static int mqnic_start_port(struct net_device *ndev)
|
|||||||
dev_info(mdev->dev, "mqnic_start_port on port %d", priv->port);
|
dev_info(mdev->dev, "mqnic_start_port on port %d", priv->port);
|
||||||
|
|
||||||
// set up event queues
|
// set up event queues
|
||||||
for (k = 0; k < priv->event_queue_count; k++)
|
for (k = 0; k < priv->event_queue_count; k++) {
|
||||||
{
|
|
||||||
priv->event_ring[k]->irq = mdev->irq_map[k % mdev->irq_count];
|
priv->event_ring[k]->irq = mdev->irq_map[k % mdev->irq_count];
|
||||||
mqnic_activate_eq_ring(priv, priv->event_ring[k], k % mdev->irq_count);
|
mqnic_activate_eq_ring(priv, priv->event_ring[k], k % mdev->irq_count);
|
||||||
mqnic_arm_eq(priv->event_ring[k]);
|
mqnic_arm_eq(priv->event_ring[k]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// set up RX completion queues
|
// set up RX completion queues
|
||||||
for (k = 0; k < priv->rx_cpl_queue_count; k++)
|
for (k = 0; k < priv->rx_cpl_queue_count; k++) {
|
||||||
{
|
|
||||||
mqnic_activate_cq_ring(priv, priv->rx_cpl_ring[k], k % priv->event_queue_count);
|
mqnic_activate_cq_ring(priv, priv->rx_cpl_ring[k], k % priv->event_queue_count);
|
||||||
priv->rx_cpl_ring[k]->ring_index = k;
|
priv->rx_cpl_ring[k]->ring_index = k;
|
||||||
priv->rx_cpl_ring[k]->handler = mqnic_rx_irq;
|
priv->rx_cpl_ring[k]->handler = mqnic_rx_irq;
|
||||||
|
|
||||||
netif_napi_add(ndev, &priv->rx_cpl_ring[k]->napi, mqnic_poll_rx_cq, NAPI_POLL_WEIGHT);
|
netif_napi_add(ndev, &priv->rx_cpl_ring[k]->napi,
|
||||||
|
mqnic_poll_rx_cq, NAPI_POLL_WEIGHT);
|
||||||
napi_enable(&priv->rx_cpl_ring[k]->napi);
|
napi_enable(&priv->rx_cpl_ring[k]->napi);
|
||||||
|
|
||||||
mqnic_arm_cq(priv->rx_cpl_ring[k]);
|
mqnic_arm_cq(priv->rx_cpl_ring[k]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// set up RX queues
|
// set up RX queues
|
||||||
for (k = 0; k < priv->rx_queue_count; k++)
|
for (k = 0; k < priv->rx_queue_count; k++) {
|
||||||
{
|
|
||||||
priv->rx_ring[k]->mtu = ndev->mtu;
|
priv->rx_ring[k]->mtu = ndev->mtu;
|
||||||
if (ndev->mtu + ETH_HLEN <= PAGE_SIZE)
|
if (ndev->mtu + ETH_HLEN <= PAGE_SIZE)
|
||||||
priv->rx_ring[k]->page_order = 0;
|
priv->rx_ring[k]->page_order = 0;
|
||||||
@ -74,28 +72,26 @@ static int mqnic_start_port(struct net_device *ndev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// set up TX completion queues
|
// set up TX completion queues
|
||||||
for (k = 0; k < priv->tx_cpl_queue_count; k++)
|
for (k = 0; k < priv->tx_cpl_queue_count; k++) {
|
||||||
{
|
|
||||||
mqnic_activate_cq_ring(priv, priv->tx_cpl_ring[k], k % priv->event_queue_count);
|
mqnic_activate_cq_ring(priv, priv->tx_cpl_ring[k], k % priv->event_queue_count);
|
||||||
priv->tx_cpl_ring[k]->ring_index = k;
|
priv->tx_cpl_ring[k]->ring_index = k;
|
||||||
priv->tx_cpl_ring[k]->handler = mqnic_tx_irq;
|
priv->tx_cpl_ring[k]->handler = mqnic_tx_irq;
|
||||||
|
|
||||||
netif_tx_napi_add(ndev, &priv->tx_cpl_ring[k]->napi, mqnic_poll_tx_cq, NAPI_POLL_WEIGHT);
|
netif_tx_napi_add(ndev, &priv->tx_cpl_ring[k]->napi,
|
||||||
|
mqnic_poll_tx_cq, NAPI_POLL_WEIGHT);
|
||||||
napi_enable(&priv->tx_cpl_ring[k]->napi);
|
napi_enable(&priv->tx_cpl_ring[k]->napi);
|
||||||
|
|
||||||
mqnic_arm_cq(priv->tx_cpl_ring[k]);
|
mqnic_arm_cq(priv->tx_cpl_ring[k]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// set up TX queues
|
// set up TX queues
|
||||||
for (k = 0; k < priv->tx_queue_count; k++)
|
for (k = 0; k < priv->tx_queue_count; k++) {
|
||||||
{
|
|
||||||
mqnic_activate_tx_ring(priv, priv->tx_ring[k], k);
|
mqnic_activate_tx_ring(priv, priv->tx_ring[k], k);
|
||||||
priv->tx_ring[k]->tx_queue = netdev_get_tx_queue(ndev, k);
|
priv->tx_ring[k]->tx_queue = netdev_get_tx_queue(ndev, k);
|
||||||
}
|
}
|
||||||
|
|
||||||
// configure ports
|
// configure ports
|
||||||
for (k = 0; k < priv->port_count; k++)
|
for (k = 0; k < priv->port_count; k++) {
|
||||||
{
|
|
||||||
// set port MTU
|
// set port MTU
|
||||||
mqnic_port_set_tx_mtu(priv->ports[k], ndev->mtu + ETH_HLEN);
|
mqnic_port_set_tx_mtu(priv->ports[k], ndev->mtu + ETH_HLEN);
|
||||||
mqnic_port_set_rx_mtu(priv->ports[k], ndev->mtu + ETH_HLEN);
|
mqnic_port_set_rx_mtu(priv->ports[k], ndev->mtu + ETH_HLEN);
|
||||||
@ -138,19 +134,14 @@ static int mqnic_stop_port(struct net_device *ndev)
|
|||||||
|
|
||||||
// disable ports
|
// disable ports
|
||||||
for (k = 0; k < priv->port_count; k++)
|
for (k = 0; k < priv->port_count; k++)
|
||||||
{
|
|
||||||
mqnic_deactivate_port(priv->ports[k]);
|
mqnic_deactivate_port(priv->ports[k]);
|
||||||
}
|
|
||||||
|
|
||||||
// deactivate TX queues
|
// deactivate TX queues
|
||||||
for (k = 0; k < priv->tx_queue_count; k++)
|
for (k = 0; k < priv->tx_queue_count; k++)
|
||||||
{
|
|
||||||
mqnic_deactivate_tx_ring(priv, priv->tx_ring[k]);
|
mqnic_deactivate_tx_ring(priv, priv->tx_ring[k]);
|
||||||
}
|
|
||||||
|
|
||||||
// deactivate TX completion queues
|
// deactivate TX completion queues
|
||||||
for (k = 0; k < priv->tx_cpl_queue_count; k++)
|
for (k = 0; k < priv->tx_cpl_queue_count; k++) {
|
||||||
{
|
|
||||||
mqnic_deactivate_cq_ring(priv, priv->tx_cpl_ring[k]);
|
mqnic_deactivate_cq_ring(priv, priv->tx_cpl_ring[k]);
|
||||||
|
|
||||||
napi_disable(&priv->tx_cpl_ring[k]->napi);
|
napi_disable(&priv->tx_cpl_ring[k]->napi);
|
||||||
@ -159,13 +150,10 @@ static int mqnic_stop_port(struct net_device *ndev)
|
|||||||
|
|
||||||
// deactivate RX queues
|
// deactivate RX queues
|
||||||
for (k = 0; k < priv->rx_queue_count; k++)
|
for (k = 0; k < priv->rx_queue_count; k++)
|
||||||
{
|
|
||||||
mqnic_deactivate_rx_ring(priv, priv->rx_ring[k]);
|
mqnic_deactivate_rx_ring(priv, priv->rx_ring[k]);
|
||||||
}
|
|
||||||
|
|
||||||
// deactivate RX completion queues
|
// deactivate RX completion queues
|
||||||
for (k = 0; k < priv->rx_cpl_queue_count; k++)
|
for (k = 0; k < priv->rx_cpl_queue_count; k++) {
|
||||||
{
|
|
||||||
mqnic_deactivate_cq_ring(priv, priv->rx_cpl_ring[k]);
|
mqnic_deactivate_cq_ring(priv, priv->rx_cpl_ring[k]);
|
||||||
|
|
||||||
napi_disable(&priv->rx_cpl_ring[k]->napi);
|
napi_disable(&priv->rx_cpl_ring[k]->napi);
|
||||||
@ -174,23 +162,17 @@ static int mqnic_stop_port(struct net_device *ndev)
|
|||||||
|
|
||||||
// deactivate event queues
|
// deactivate event queues
|
||||||
for (k = 0; k < priv->event_queue_count; k++)
|
for (k = 0; k < priv->event_queue_count; k++)
|
||||||
{
|
|
||||||
mqnic_deactivate_eq_ring(priv, priv->event_ring[k]);
|
mqnic_deactivate_eq_ring(priv, priv->event_ring[k]);
|
||||||
}
|
|
||||||
|
|
||||||
msleep(10);
|
msleep(10);
|
||||||
|
|
||||||
// free descriptors in TX queues
|
// free descriptors in TX queues
|
||||||
for (k = 0; k < priv->tx_queue_count; k++)
|
for (k = 0; k < priv->tx_queue_count; k++)
|
||||||
{
|
|
||||||
mqnic_free_tx_buf(priv, priv->tx_ring[k]);
|
mqnic_free_tx_buf(priv, priv->tx_ring[k]);
|
||||||
}
|
|
||||||
|
|
||||||
// free descriptors in RX queues
|
// free descriptors in RX queues
|
||||||
for (k = 0; k < priv->rx_queue_count; k++)
|
for (k = 0; k < priv->rx_queue_count; k++)
|
||||||
{
|
|
||||||
mqnic_free_rx_buf(priv, priv->rx_ring[k]);
|
mqnic_free_rx_buf(priv, priv->rx_ring[k]);
|
||||||
}
|
|
||||||
|
|
||||||
netif_carrier_off(ndev);
|
netif_carrier_off(ndev);
|
||||||
return 0;
|
return 0;
|
||||||
@ -207,9 +189,7 @@ static int mqnic_open(struct net_device *ndev)
|
|||||||
ret = mqnic_start_port(ndev);
|
ret = mqnic_start_port(ndev);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
{
|
|
||||||
dev_err(mdev->dev, "Failed to start port: %d", priv->port);
|
dev_err(mdev->dev, "Failed to start port: %d", priv->port);
|
||||||
}
|
|
||||||
|
|
||||||
mutex_unlock(&mdev->state_lock);
|
mutex_unlock(&mdev->state_lock);
|
||||||
return ret;
|
return ret;
|
||||||
@ -226,9 +206,7 @@ static int mqnic_close(struct net_device *ndev)
|
|||||||
ret = mqnic_stop_port(ndev);
|
ret = mqnic_stop_port(ndev);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
{
|
|
||||||
dev_err(mdev->dev, "Failed to stop port: %d", priv->port);
|
dev_err(mdev->dev, "Failed to stop port: %d", priv->port);
|
||||||
}
|
|
||||||
|
|
||||||
mutex_unlock(&mdev->state_lock);
|
mutex_unlock(&mdev->state_lock);
|
||||||
return ret;
|
return ret;
|
||||||
@ -245,8 +223,7 @@ void mqnic_update_stats(struct net_device *ndev)
|
|||||||
|
|
||||||
packets = 0;
|
packets = 0;
|
||||||
bytes = 0;
|
bytes = 0;
|
||||||
for (k = 0; k < priv->rx_queue_count; k++)
|
for (k = 0; k < priv->rx_queue_count; k++) {
|
||||||
{
|
|
||||||
const struct mqnic_ring *ring = priv->rx_ring[k];
|
const struct mqnic_ring *ring = priv->rx_ring[k];
|
||||||
|
|
||||||
packets += READ_ONCE(ring->packets);
|
packets += READ_ONCE(ring->packets);
|
||||||
@ -257,8 +234,7 @@ void mqnic_update_stats(struct net_device *ndev)
|
|||||||
|
|
||||||
packets = 0;
|
packets = 0;
|
||||||
bytes = 0;
|
bytes = 0;
|
||||||
for (k = 0; k < priv->tx_queue_count; k++)
|
for (k = 0; k < priv->tx_queue_count; k++) {
|
||||||
{
|
|
||||||
const struct mqnic_ring *ring = priv->tx_ring[k];
|
const struct mqnic_ring *ring = priv->tx_ring[k];
|
||||||
|
|
||||||
packets += READ_ONCE(ring->packets);
|
packets += READ_ONCE(ring->packets);
|
||||||
@ -268,7 +244,8 @@ void mqnic_update_stats(struct net_device *ndev)
|
|||||||
ndev->stats.tx_bytes = bytes;
|
ndev->stats.tx_bytes = bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mqnic_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats)
|
static void mqnic_get_stats64(struct net_device *ndev,
|
||||||
|
struct rtnl_link_stats64 *stats)
|
||||||
{
|
{
|
||||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||||
|
|
||||||
@ -284,14 +261,10 @@ static int mqnic_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
|
|||||||
struct hwtstamp_config hwts_config;
|
struct hwtstamp_config hwts_config;
|
||||||
|
|
||||||
if (copy_from_user(&hwts_config, ifr->ifr_data, sizeof(hwts_config)))
|
if (copy_from_user(&hwts_config, ifr->ifr_data, sizeof(hwts_config)))
|
||||||
{
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
|
||||||
|
|
||||||
if (hwts_config.flags)
|
if (hwts_config.flags)
|
||||||
{
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
switch (hwts_config.tx_type) {
|
switch (hwts_config.tx_type) {
|
||||||
case HWTSTAMP_TX_OFF:
|
case HWTSTAMP_TX_OFF:
|
||||||
@ -328,36 +301,27 @@ static int mqnic_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
|
|||||||
memcpy(&priv->hwts_config, &hwts_config, sizeof(hwts_config));
|
memcpy(&priv->hwts_config, &hwts_config, sizeof(hwts_config));
|
||||||
|
|
||||||
if (copy_to_user(ifr->ifr_data, &hwts_config, sizeof(hwts_config)))
|
if (copy_to_user(ifr->ifr_data, &hwts_config, sizeof(hwts_config)))
|
||||||
{
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static int mqnic_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
|
static int mqnic_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
|
||||||
{
|
{
|
||||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||||
|
|
||||||
if (copy_to_user(ifr->ifr_data, &priv->hwts_config, sizeof(priv->hwts_config)))
|
if (copy_to_user(ifr->ifr_data, &priv->hwts_config, sizeof(priv->hwts_config)))
|
||||||
{
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static int mqnic_change_mtu(struct net_device *ndev, int new_mtu)
|
static int mqnic_change_mtu(struct net_device *ndev, int new_mtu)
|
||||||
{
|
{
|
||||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||||
struct mqnic_dev *mdev = priv->mdev;
|
struct mqnic_dev *mdev = priv->mdev;
|
||||||
|
|
||||||
if (new_mtu < ndev->min_mtu || new_mtu > ndev->max_mtu)
|
if (new_mtu < ndev->min_mtu || new_mtu > ndev->max_mtu) {
|
||||||
{
|
|
||||||
dev_err(mdev->dev, "Bad MTU: %d", new_mtu);
|
dev_err(mdev->dev, "Bad MTU: %d", new_mtu);
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
@ -366,8 +330,7 @@ static int mqnic_change_mtu(struct net_device *ndev, int new_mtu)
|
|||||||
|
|
||||||
ndev->mtu = new_mtu;
|
ndev->mtu = new_mtu;
|
||||||
|
|
||||||
if (netif_running(ndev))
|
if (netif_running(ndev)) {
|
||||||
{
|
|
||||||
mutex_lock(&mdev->state_lock);
|
mutex_lock(&mdev->state_lock);
|
||||||
|
|
||||||
mqnic_stop_port(ndev);
|
mqnic_stop_port(ndev);
|
||||||
@ -411,8 +374,8 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
|
|||||||
u32 desc_block_size;
|
u32 desc_block_size;
|
||||||
|
|
||||||
ndev = alloc_etherdev_mqs(sizeof(*priv), MQNIC_MAX_TX_RINGS, MQNIC_MAX_RX_RINGS);
|
ndev = alloc_etherdev_mqs(sizeof(*priv), MQNIC_MAX_TX_RINGS, MQNIC_MAX_RX_RINGS);
|
||||||
if (!ndev)
|
if (!ndev) {
|
||||||
{
|
dev_err(dev, "Failed to allocate memory");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -487,17 +450,13 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
|
|||||||
// set MAC
|
// set MAC
|
||||||
ndev->addr_len = ETH_ALEN;
|
ndev->addr_len = ETH_ALEN;
|
||||||
|
|
||||||
if (port >= mdev->mac_count)
|
if (port >= mdev->mac_count) {
|
||||||
{
|
|
||||||
dev_warn(dev, "Exhausted permanent MAC addresses; using random MAC");
|
dev_warn(dev, "Exhausted permanent MAC addresses; using random MAC");
|
||||||
eth_hw_addr_random(ndev);
|
eth_hw_addr_random(ndev);
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
memcpy(ndev->dev_addr, mdev->mac_list[port], ETH_ALEN);
|
memcpy(ndev->dev_addr, mdev->mac_list[port], ETH_ALEN);
|
||||||
|
|
||||||
if (!is_valid_ether_addr(ndev->dev_addr))
|
if (!is_valid_ether_addr(ndev->dev_addr)) {
|
||||||
{
|
|
||||||
dev_warn(dev, "Invalid MAC address in list; using random MAC");
|
dev_warn(dev, "Invalid MAC address in list; using random MAC");
|
||||||
eth_hw_addr_random(ndev);
|
eth_hw_addr_random(ndev);
|
||||||
}
|
}
|
||||||
@ -519,58 +478,46 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
|
|||||||
desc_block_size = priv->max_desc_block_size < 4 ? priv->max_desc_block_size : 4;
|
desc_block_size = priv->max_desc_block_size < 4 ? priv->max_desc_block_size : 4;
|
||||||
|
|
||||||
// allocate rings
|
// allocate rings
|
||||||
for (k = 0; k < priv->event_queue_count; k++)
|
for (k = 0; k < priv->event_queue_count; k++) {
|
||||||
{
|
ret = mqnic_create_eq_ring(priv, &priv->event_ring[k], 1024, MQNIC_EVENT_SIZE, k,
|
||||||
ret = mqnic_create_eq_ring(priv, &priv->event_ring[k], 1024, MQNIC_EVENT_SIZE, k, hw_addr+priv->event_queue_offset+k*MQNIC_EVENT_QUEUE_STRIDE); // TODO configure/constant
|
hw_addr + priv->event_queue_offset + k * MQNIC_EVENT_QUEUE_STRIDE); // TODO configure/constant
|
||||||
if (ret)
|
if (ret)
|
||||||
{
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for (k = 0; k < priv->tx_queue_count; k++)
|
for (k = 0; k < priv->tx_queue_count; k++) {
|
||||||
{
|
ret = mqnic_create_tx_ring(priv, &priv->tx_ring[k], 1024, MQNIC_DESC_SIZE * desc_block_size, k,
|
||||||
ret = mqnic_create_tx_ring(priv, &priv->tx_ring[k], 1024, MQNIC_DESC_SIZE*desc_block_size, k, hw_addr+priv->tx_queue_offset+k*MQNIC_QUEUE_STRIDE); // TODO configure/constant
|
hw_addr + priv->tx_queue_offset + k * MQNIC_QUEUE_STRIDE); // TODO configure/constant
|
||||||
if (ret)
|
if (ret)
|
||||||
{
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for (k = 0; k < priv->tx_cpl_queue_count; k++)
|
for (k = 0; k < priv->tx_cpl_queue_count; k++) {
|
||||||
{
|
ret = mqnic_create_cq_ring(priv, &priv->tx_cpl_ring[k], 1024, MQNIC_CPL_SIZE, k,
|
||||||
ret = mqnic_create_cq_ring(priv, &priv->tx_cpl_ring[k], 1024, MQNIC_CPL_SIZE, k, hw_addr+priv->tx_cpl_queue_offset+k*MQNIC_CPL_QUEUE_STRIDE); // TODO configure/constant
|
hw_addr + priv->tx_cpl_queue_offset + k * MQNIC_CPL_QUEUE_STRIDE); // TODO configure/constant
|
||||||
if (ret)
|
if (ret)
|
||||||
{
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for (k = 0; k < priv->rx_queue_count; k++)
|
for (k = 0; k < priv->rx_queue_count; k++) {
|
||||||
{
|
ret = mqnic_create_rx_ring(priv, &priv->rx_ring[k], 1024, MQNIC_DESC_SIZE, k,
|
||||||
ret = mqnic_create_rx_ring(priv, &priv->rx_ring[k], 1024, MQNIC_DESC_SIZE, k, hw_addr+priv->rx_queue_offset+k*MQNIC_QUEUE_STRIDE); // TODO configure/constant
|
hw_addr + priv->rx_queue_offset + k * MQNIC_QUEUE_STRIDE); // TODO configure/constant
|
||||||
if (ret)
|
if (ret)
|
||||||
{
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for (k = 0; k < priv->rx_cpl_queue_count; k++)
|
for (k = 0; k < priv->rx_cpl_queue_count; k++) {
|
||||||
{
|
ret = mqnic_create_cq_ring(priv, &priv->rx_cpl_ring[k], 1024, MQNIC_CPL_SIZE, k,
|
||||||
ret = mqnic_create_cq_ring(priv, &priv->rx_cpl_ring[k], 1024, MQNIC_CPL_SIZE, k, hw_addr+priv->rx_cpl_queue_offset+k*MQNIC_CPL_QUEUE_STRIDE); // TODO configure/constant
|
hw_addr + priv->rx_cpl_queue_offset + k * MQNIC_CPL_QUEUE_STRIDE); // TODO configure/constant
|
||||||
if (ret)
|
if (ret)
|
||||||
{
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for (k = 0; k < priv->port_count; k++)
|
for (k = 0; k < priv->port_count; k++) {
|
||||||
{
|
ret = mqnic_create_port(priv, &priv->ports[k], k,
|
||||||
ret = mqnic_create_port(priv, &priv->ports[k], k, hw_addr+priv->port_offset+k*priv->port_stride);
|
hw_addr + priv->port_offset + k * priv->port_stride);
|
||||||
if (ret)
|
if (ret)
|
||||||
{
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
|
||||||
|
|
||||||
mqnic_port_set_rss_mask(priv->ports[k], 0xffffffff);
|
mqnic_port_set_rss_mask(priv->ports[k], 0xffffffff);
|
||||||
}
|
}
|
||||||
@ -583,14 +530,10 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
|
|||||||
ndev->hw_features = NETIF_F_SG;
|
ndev->hw_features = NETIF_F_SG;
|
||||||
|
|
||||||
if (priv->if_features & MQNIC_IF_FEATURE_RX_CSUM)
|
if (priv->if_features & MQNIC_IF_FEATURE_RX_CSUM)
|
||||||
{
|
|
||||||
ndev->hw_features |= NETIF_F_RXCSUM;
|
ndev->hw_features |= NETIF_F_RXCSUM;
|
||||||
}
|
|
||||||
|
|
||||||
if (priv->if_features & MQNIC_IF_FEATURE_TX_CSUM)
|
if (priv->if_features & MQNIC_IF_FEATURE_TX_CSUM)
|
||||||
{
|
|
||||||
ndev->hw_features |= NETIF_F_HW_CSUM;
|
ndev->hw_features |= NETIF_F_HW_CSUM;
|
||||||
}
|
|
||||||
|
|
||||||
ndev->features = ndev->hw_features | NETIF_F_HIGHDMA;
|
ndev->features = ndev->hw_features | NETIF_F_HIGHDMA;
|
||||||
ndev->hw_features |= 0;
|
ndev->hw_features |= 0;
|
||||||
@ -599,15 +542,12 @@ int mqnic_init_netdev(struct mqnic_dev *mdev, int port, u8 __iomem *hw_addr)
|
|||||||
ndev->max_mtu = 1500;
|
ndev->max_mtu = 1500;
|
||||||
|
|
||||||
if (priv->ports[0] && priv->ports[0]->port_mtu)
|
if (priv->ports[0] && priv->ports[0]->port_mtu)
|
||||||
{
|
|
||||||
ndev->max_mtu = priv->ports[0]->port_mtu - ETH_HLEN;
|
ndev->max_mtu = priv->ports[0]->port_mtu - ETH_HLEN;
|
||||||
}
|
|
||||||
|
|
||||||
netif_carrier_off(ndev);
|
netif_carrier_off(ndev);
|
||||||
|
|
||||||
ret = register_netdev(ndev);
|
ret = register_netdev(ndev);
|
||||||
if (ret)
|
if (ret) {
|
||||||
{
|
|
||||||
dev_err(dev, "netdev registration failed on port %d", port);
|
dev_err(dev, "netdev registration failed on port %d", port);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
@ -630,61 +570,34 @@ void mqnic_destroy_netdev(struct net_device *ndev)
|
|||||||
int k;
|
int k;
|
||||||
|
|
||||||
if (priv->registered)
|
if (priv->registered)
|
||||||
{
|
|
||||||
unregister_netdev(ndev);
|
unregister_netdev(ndev);
|
||||||
}
|
|
||||||
|
|
||||||
mdev->ndev[priv->port] = NULL;
|
mdev->ndev[priv->port] = NULL;
|
||||||
|
|
||||||
// free rings
|
// free rings
|
||||||
for (k = 0; k < ARRAY_SIZE(priv->event_ring); k++)
|
for (k = 0; k < ARRAY_SIZE(priv->event_ring); k++)
|
||||||
{
|
|
||||||
if (priv->event_ring[k])
|
if (priv->event_ring[k])
|
||||||
{
|
|
||||||
mqnic_destroy_eq_ring(priv, &priv->event_ring[k]);
|
mqnic_destroy_eq_ring(priv, &priv->event_ring[k]);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (k = 0; k < ARRAY_SIZE(priv->tx_ring); k++)
|
for (k = 0; k < ARRAY_SIZE(priv->tx_ring); k++)
|
||||||
{
|
|
||||||
if (priv->tx_ring[k])
|
if (priv->tx_ring[k])
|
||||||
{
|
|
||||||
mqnic_destroy_tx_ring(priv, &priv->tx_ring[k]);
|
mqnic_destroy_tx_ring(priv, &priv->tx_ring[k]);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (k = 0; k < ARRAY_SIZE(priv->tx_cpl_ring); k++)
|
for (k = 0; k < ARRAY_SIZE(priv->tx_cpl_ring); k++)
|
||||||
{
|
|
||||||
if (priv->tx_cpl_ring[k])
|
if (priv->tx_cpl_ring[k])
|
||||||
{
|
|
||||||
mqnic_destroy_cq_ring(priv, &priv->tx_cpl_ring[k]);
|
mqnic_destroy_cq_ring(priv, &priv->tx_cpl_ring[k]);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (k = 0; k < ARRAY_SIZE(priv->rx_ring); k++)
|
for (k = 0; k < ARRAY_SIZE(priv->rx_ring); k++)
|
||||||
{
|
|
||||||
if (priv->rx_ring[k])
|
if (priv->rx_ring[k])
|
||||||
{
|
|
||||||
mqnic_destroy_rx_ring(priv, &priv->rx_ring[k]);
|
mqnic_destroy_rx_ring(priv, &priv->rx_ring[k]);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (k = 0; k < ARRAY_SIZE(priv->rx_cpl_ring); k++)
|
for (k = 0; k < ARRAY_SIZE(priv->rx_cpl_ring); k++)
|
||||||
{
|
|
||||||
if (priv->rx_cpl_ring[k])
|
if (priv->rx_cpl_ring[k])
|
||||||
{
|
|
||||||
mqnic_destroy_cq_ring(priv, &priv->rx_cpl_ring[k]);
|
mqnic_destroy_cq_ring(priv, &priv->rx_cpl_ring[k]);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (k = 0; k < ARRAY_SIZE(priv->ports); k++)
|
for (k = 0; k < ARRAY_SIZE(priv->ports); k++)
|
||||||
{
|
|
||||||
if (priv->ports[k])
|
if (priv->ports[k])
|
||||||
{
|
|
||||||
mqnic_destroy_port(priv, &priv->ports[k]);
|
mqnic_destroy_port(priv, &priv->ports[k]);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
free_netdev(ndev);
|
free_netdev(ndev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -33,14 +33,14 @@ either expressed or implied, of The Regents of the University of California.
|
|||||||
|
|
||||||
#include "mqnic.h"
|
#include "mqnic.h"
|
||||||
|
|
||||||
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr, int index, u8 __iomem *hw_addr)
|
int mqnic_create_port(struct mqnic_priv *priv, struct mqnic_port **port_ptr,
|
||||||
|
int index, u8 __iomem *hw_addr)
|
||||||
{
|
{
|
||||||
struct device *dev = priv->dev;
|
struct device *dev = priv->dev;
|
||||||
struct mqnic_port *port;
|
struct mqnic_port *port;
|
||||||
|
|
||||||
port = kzalloc(sizeof(*port), GFP_KERNEL);
|
port = kzalloc(sizeof(*port), GFP_KERNEL);
|
||||||
if (!port)
|
if (!port) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to allocate port");
|
dev_err(dev, "Failed to allocate port");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -97,9 +97,7 @@ int mqnic_activate_port(struct mqnic_port *port)
|
|||||||
|
|
||||||
// enable queues
|
// enable queues
|
||||||
for (k = 0; k < port->tx_queue_count; k++)
|
for (k = 0; k < port->tx_queue_count; k++)
|
||||||
{
|
|
||||||
iowrite32(3, port->hw_addr + port->sched_offset + k * 4);
|
iowrite32(3, port->hw_addr + port->sched_offset + k * 4);
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -34,13 +34,13 @@ either expressed or implied, of The Regents of the University of California.
|
|||||||
#include "mqnic.h"
|
#include "mqnic.h"
|
||||||
#include <linux/version.h>
|
#include <linux/version.h>
|
||||||
|
|
||||||
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring, const struct mqnic_cpl *cpl)
|
ktime_t mqnic_read_cpl_ts(struct mqnic_dev *mdev, struct mqnic_ring *ring,
|
||||||
|
const struct mqnic_cpl *cpl)
|
||||||
{
|
{
|
||||||
u64 ts_s = le16_to_cpu(cpl->ts_s);
|
u64 ts_s = le16_to_cpu(cpl->ts_s);
|
||||||
u32 ts_ns = le32_to_cpu(cpl->ts_ns);
|
u32 ts_ns = le32_to_cpu(cpl->ts_ns);
|
||||||
|
|
||||||
if (unlikely(!ring->ts_valid || (ring->ts_s ^ ts_s) & 0xff00))
|
if (unlikely(!ring->ts_valid || (ring->ts_s ^ ts_s) & 0xff00)) {
|
||||||
{
|
|
||||||
// seconds MSBs do not match, update cached timestamp
|
// seconds MSBs do not match, update cached timestamp
|
||||||
ring->ts_s = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_CUR_SEC_L);
|
ring->ts_s = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_CUR_SEC_L);
|
||||||
ring->ts_s |= (u64) ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_CUR_SEC_H) << 32;
|
ring->ts_s |= (u64) ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_CUR_SEC_H) << 32;
|
||||||
@ -61,8 +61,7 @@ static int mqnic_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
|
|||||||
|
|
||||||
dev_info(mdev->dev, "mqnic_phc_adjfine scaled_ppm: %ld", scaled_ppm);
|
dev_info(mdev->dev, "mqnic_phc_adjfine scaled_ppm: %ld", scaled_ppm);
|
||||||
|
|
||||||
if (scaled_ppm < 0)
|
if (scaled_ppm < 0) {
|
||||||
{
|
|
||||||
neg = true;
|
neg = true;
|
||||||
scaled_ppm = -scaled_ppm;
|
scaled_ppm = -scaled_ppm;
|
||||||
}
|
}
|
||||||
@ -76,13 +75,9 @@ static int mqnic_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
|
|||||||
adj = div_u64(((nom_per_fns >> 16) * scaled_ppm) + 500000, 1000000);
|
adj = div_u64(((nom_per_fns >> 16) * scaled_ppm) + 500000, 1000000);
|
||||||
|
|
||||||
if (neg)
|
if (neg)
|
||||||
{
|
|
||||||
adj = nom_per_fns - adj;
|
adj = nom_per_fns - adj;
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
adj = nom_per_fns + adj;
|
adj = nom_per_fns + adj;
|
||||||
}
|
|
||||||
|
|
||||||
iowrite32(adj & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_FNS);
|
iowrite32(adj & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_FNS);
|
||||||
iowrite32(adj >> 32, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_NS);
|
iowrite32(adj >> 32, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_NS);
|
||||||
@ -105,7 +100,8 @@ static int mqnic_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
|
||||||
static int mqnic_phc_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts)
|
static int mqnic_phc_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
|
||||||
|
struct ptp_system_timestamp *sts)
|
||||||
{
|
{
|
||||||
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
|
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
|
||||||
|
|
||||||
@ -139,14 +135,11 @@ static int mqnic_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
|||||||
|
|
||||||
dev_info(mdev->dev, "mqnic_phc_adjtime delta: %lld", delta);
|
dev_info(mdev->dev, "mqnic_phc_adjtime delta: %lld", delta);
|
||||||
|
|
||||||
if (delta > 1000000000 || delta < -1000000000)
|
if (delta > 1000000000 || delta < -1000000000) {
|
||||||
{
|
|
||||||
mqnic_phc_gettime(ptp, &ts);
|
mqnic_phc_gettime(ptp, &ts);
|
||||||
ts = timespec64_add(ts, ns_to_timespec64(delta));
|
ts = timespec64_add(ts, ns_to_timespec64(delta));
|
||||||
mqnic_phc_settime(ptp, &ts);
|
mqnic_phc_settime(ptp, &ts);
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
iowrite32(0, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_FNS);
|
iowrite32(0, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_FNS);
|
||||||
iowrite32(delta & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_NS);
|
iowrite32(delta & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_NS);
|
||||||
iowrite32(1, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_COUNT);
|
iowrite32(1, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_ADJ_COUNT);
|
||||||
@ -164,14 +157,11 @@ static int mqnic_phc_perout(struct ptp_clock_info *ptp, int on, struct ptp_perou
|
|||||||
u32 start_nsec, period_nsec, width_nsec;
|
u32 start_nsec, period_nsec, width_nsec;
|
||||||
|
|
||||||
if (perout->index >= mdev->ptp_clock_info.n_per_out)
|
if (perout->index >= mdev->ptp_clock_info.n_per_out)
|
||||||
{
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
hw_addr = mdev->phc_hw_addr + MQNIC_PHC_PEROUT_OFFSET;
|
hw_addr = mdev->phc_hw_addr + MQNIC_PHC_PEROUT_OFFSET;
|
||||||
|
|
||||||
if (!on)
|
if (!on) {
|
||||||
{
|
|
||||||
iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_CTRL);
|
iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_CTRL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -217,10 +207,10 @@ static int mqnic_phc_perout(struct ptp_clock_info *ptp, int on, struct ptp_perou
|
|||||||
|
|
||||||
static int mqnic_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
|
static int mqnic_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
|
||||||
{
|
{
|
||||||
if (request)
|
if (!request)
|
||||||
{
|
return -EINVAL;
|
||||||
switch (request->type)
|
|
||||||
{
|
switch (request->type) {
|
||||||
case PTP_CLK_REQ_EXTTS:
|
case PTP_CLK_REQ_EXTTS:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
case PTP_CLK_REQ_PEROUT:
|
case PTP_CLK_REQ_PEROUT:
|
||||||
@ -231,11 +221,6 @@ static int mqnic_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mqnic_phc_set_from_system_clock(struct ptp_clock_info *ptp)
|
static void mqnic_phc_set_from_system_clock(struct ptp_clock_info *ptp)
|
||||||
{
|
{
|
||||||
@ -254,38 +239,34 @@ void mqnic_register_phc(struct mqnic_dev *mdev)
|
|||||||
{
|
{
|
||||||
u32 phc_features;
|
u32 phc_features;
|
||||||
|
|
||||||
if (mdev->ptp_clock)
|
if (mdev->ptp_clock) {
|
||||||
{
|
dev_warn(mdev->dev, "PTP clock already registered");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
phc_features = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_FEATURES);
|
phc_features = ioread32(mdev->phc_hw_addr + MQNIC_PHC_REG_FEATURES);
|
||||||
|
|
||||||
mdev->ptp_clock_info.owner = THIS_MODULE;
|
mdev->ptp_clock_info.owner = THIS_MODULE;
|
||||||
mdev->ptp_clock_info.max_adj = 100000000,
|
mdev->ptp_clock_info.max_adj = 100000000;
|
||||||
mdev->ptp_clock_info.n_alarm = 0,
|
mdev->ptp_clock_info.n_alarm = 0;
|
||||||
mdev->ptp_clock_info.n_ext_ts = 0,
|
mdev->ptp_clock_info.n_ext_ts = 0;
|
||||||
mdev->ptp_clock_info.n_per_out = phc_features & 0xff,
|
mdev->ptp_clock_info.n_per_out = phc_features & 0xff;
|
||||||
mdev->ptp_clock_info.n_pins = 0,
|
mdev->ptp_clock_info.n_pins = 0;
|
||||||
mdev->ptp_clock_info.pps = 0,
|
mdev->ptp_clock_info.pps = 0;
|
||||||
mdev->ptp_clock_info.adjfine = mqnic_phc_adjfine,
|
mdev->ptp_clock_info.adjfine = mqnic_phc_adjfine;
|
||||||
mdev->ptp_clock_info.adjtime = mqnic_phc_adjtime,
|
mdev->ptp_clock_info.adjtime = mqnic_phc_adjtime;
|
||||||
mdev->ptp_clock_info.gettime64 = mqnic_phc_gettime,
|
mdev->ptp_clock_info.gettime64 = mqnic_phc_gettime;
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
|
||||||
mdev->ptp_clock_info.gettimex64 = mqnic_phc_gettimex,
|
mdev->ptp_clock_info.gettimex64 = mqnic_phc_gettimex;
|
||||||
#endif
|
#endif
|
||||||
mdev->ptp_clock_info.settime64 = mqnic_phc_settime,
|
mdev->ptp_clock_info.settime64 = mqnic_phc_settime;
|
||||||
mdev->ptp_clock_info.enable = mqnic_phc_enable,
|
mdev->ptp_clock_info.enable = mqnic_phc_enable;
|
||||||
|
|
||||||
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, mdev->dev);
|
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, mdev->dev);
|
||||||
|
|
||||||
if (IS_ERR(mdev->ptp_clock))
|
if (IS_ERR(mdev->ptp_clock)) {
|
||||||
{
|
|
||||||
mdev->ptp_clock = NULL;
|
mdev->ptp_clock = NULL;
|
||||||
dev_err(mdev->dev, "ptp_clock_register failed");
|
dev_err(mdev->dev, "ptp_clock_register failed");
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
dev_info(mdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock));
|
dev_info(mdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock));
|
||||||
|
|
||||||
mqnic_phc_set_from_system_clock(&mdev->ptp_clock_info);
|
mqnic_phc_set_from_system_clock(&mdev->ptp_clock_info);
|
||||||
@ -294,11 +275,9 @@ void mqnic_register_phc(struct mqnic_dev *mdev)
|
|||||||
|
|
||||||
void mqnic_unregister_phc(struct mqnic_dev *mdev)
|
void mqnic_unregister_phc(struct mqnic_dev *mdev)
|
||||||
{
|
{
|
||||||
if (mdev->ptp_clock)
|
if (mdev->ptp_clock) {
|
||||||
{
|
|
||||||
ptp_clock_unregister(mdev->ptp_clock);
|
ptp_clock_unregister(mdev->ptp_clock);
|
||||||
mdev->ptp_clock = NULL;
|
mdev->ptp_clock = NULL;
|
||||||
dev_info(mdev->dev, "unregistered PHC");
|
dev_info(mdev->dev, "unregistered PHC");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -33,15 +33,15 @@ either expressed or implied, of The Regents of the University of California.
|
|||||||
|
|
||||||
#include "mqnic.h"
|
#include "mqnic.h"
|
||||||
|
|
||||||
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
|
int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||||
|
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||||
{
|
{
|
||||||
struct device *dev = priv->dev;
|
struct device *dev = priv->dev;
|
||||||
struct mqnic_ring *ring;
|
struct mqnic_ring *ring;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||||
if (!ring)
|
if (!ring) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to allocate RX ring");
|
dev_err(dev, "Failed to allocate RX ring");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -55,17 +55,16 @@ int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
|||||||
ring->desc_block_size = 1 << ring->log_desc_block_size;
|
ring->desc_block_size = 1 << ring->log_desc_block_size;
|
||||||
|
|
||||||
ring->rx_info = kvzalloc(sizeof(*ring->rx_info) * ring->size, GFP_KERNEL);
|
ring->rx_info = kvzalloc(sizeof(*ring->rx_info) * ring->size, GFP_KERNEL);
|
||||||
if (!ring->rx_info)
|
if (!ring->rx_info) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to allocate rx_info");
|
dev_err(dev, "Failed to allocate rx_info");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail_ring;
|
goto fail_ring;
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->buf_size = ring->size * ring->stride;
|
ring->buf_size = ring->size * ring->stride;
|
||||||
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
|
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
|
||||||
if (!ring->buf)
|
&ring->buf_dma_addr, GFP_KERNEL);
|
||||||
{
|
if (!ring->buf) {
|
||||||
dev_err(dev, "Failed to allocate RX ring DMA buffer");
|
dev_err(dev, "Failed to allocate RX ring DMA buffer");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail_info;
|
goto fail_info;
|
||||||
@ -91,7 +90,8 @@ int mqnic_create_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
|||||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
|
||||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
||||||
// set size
|
// set size
|
||||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||||
|
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
|
|
||||||
*ring_ptr = ring;
|
*ring_ptr = ring;
|
||||||
return 0;
|
return 0;
|
||||||
@ -121,7 +121,8 @@ void mqnic_destroy_rx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr
|
|||||||
kfree(ring);
|
kfree(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index)
|
int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
int cpl_index)
|
||||||
{
|
{
|
||||||
// deactivate queue
|
// deactivate queue
|
||||||
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
@ -134,7 +135,8 @@ int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int
|
|||||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
|
||||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
||||||
// set size and activate queue
|
// set size and activate queue
|
||||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK,
|
||||||
|
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
|
|
||||||
mqnic_refill_rx_buffers(priv, ring);
|
mqnic_refill_rx_buffers(priv, ring);
|
||||||
|
|
||||||
@ -144,7 +146,8 @@ int mqnic_activate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int
|
|||||||
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
void mqnic_deactivate_rx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||||
{
|
{
|
||||||
// deactivate queue
|
// deactivate queue
|
||||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||||
|
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring)
|
bool mqnic_is_rx_ring_empty(const struct mqnic_ring *ring)
|
||||||
@ -167,12 +170,14 @@ void mqnic_rx_write_head_ptr(struct mqnic_ring *ring)
|
|||||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index)
|
void mqnic_free_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
int index)
|
||||||
{
|
{
|
||||||
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
|
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
|
||||||
struct page *page = rx_info->page;
|
struct page *page = rx_info->page;
|
||||||
|
|
||||||
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr), dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
|
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr),
|
||||||
|
dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
|
||||||
rx_info->dma_addr = 0;
|
rx_info->dma_addr = 0;
|
||||||
__free_pages(page, rx_info->page_order);
|
__free_pages(page, rx_info->page_order);
|
||||||
rx_info->page = NULL;
|
rx_info->page = NULL;
|
||||||
@ -183,8 +188,7 @@ int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
|||||||
u32 index;
|
u32 index;
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
|
|
||||||
while (!mqnic_is_rx_ring_empty(ring))
|
while (!mqnic_is_rx_ring_empty(ring)) {
|
||||||
{
|
|
||||||
index = ring->clean_tail_ptr & ring->size_mask;
|
index = ring->clean_tail_ptr & ring->size_mask;
|
||||||
mqnic_free_rx_desc(priv, ring, index);
|
mqnic_free_rx_desc(priv, ring, index);
|
||||||
ring->clean_tail_ptr++;
|
ring->clean_tail_ptr++;
|
||||||
@ -198,7 +202,8 @@ int mqnic_free_rx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
|||||||
return cnt;
|
return cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index)
|
int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
int index)
|
||||||
{
|
{
|
||||||
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
|
struct mqnic_rx_info *rx_info = &ring->rx_info[index];
|
||||||
struct mqnic_desc *rx_desc = (struct mqnic_desc *)(ring->buf + index * ring->stride);
|
struct mqnic_desc *rx_desc = (struct mqnic_desc *)(ring->buf + index * ring->stride);
|
||||||
@ -207,25 +212,25 @@ int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int
|
|||||||
u32 len = PAGE_SIZE << page_order;
|
u32 len = PAGE_SIZE << page_order;
|
||||||
dma_addr_t dma_addr;
|
dma_addr_t dma_addr;
|
||||||
|
|
||||||
if (unlikely(page))
|
if (unlikely(page)) {
|
||||||
{
|
dev_err(priv->dev, "mqnic_prepare_rx_desc skb not yet processed on port %d",
|
||||||
dev_err(priv->dev, "mqnic_prepare_rx_desc skb not yet processed on port %d", priv->port);
|
priv->port);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
page = dev_alloc_pages(page_order);
|
page = dev_alloc_pages(page_order);
|
||||||
if (unlikely(!page))
|
if (unlikely(!page)) {
|
||||||
{
|
dev_err(priv->dev, "mqnic_prepare_rx_desc failed to allocate memory on port %d",
|
||||||
dev_err(priv->dev, "mqnic_prepare_rx_desc failed to allocate memory on port %d", priv->port);
|
priv->port);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// map page
|
// map page
|
||||||
dma_addr = dma_map_page(priv->dev, page, 0, len, PCI_DMA_FROMDEVICE);
|
dma_addr = dma_map_page(priv->dev, page, 0, len, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
|
if (unlikely(dma_mapping_error(priv->dev, dma_addr))) {
|
||||||
{
|
dev_err(priv->dev, "mqnic_prepare_rx_desc DMA mapping failed on port %d",
|
||||||
dev_err(priv->dev, "mqnic_prepare_rx_desc DMA mapping failed on port %d", priv->port);
|
priv->port);
|
||||||
__free_pages(page, page_order);
|
__free_pages(page, page_order);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -251,8 +256,7 @@ void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
|||||||
if (missing < 8)
|
if (missing < 8)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for ( ; missing-- > 0; )
|
for (; missing-- > 0;) {
|
||||||
{
|
|
||||||
if (mqnic_prepare_rx_desc(priv, ring, ring->head_ptr & ring->size_mask))
|
if (mqnic_prepare_rx_desc(priv, ring, ring->head_ptr & ring->size_mask))
|
||||||
break;
|
break;
|
||||||
ring->head_ptr++;
|
ring->head_ptr++;
|
||||||
@ -263,7 +267,8 @@ void mqnic_refill_rx_buffers(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
|||||||
mqnic_rx_write_head_ptr(ring);
|
mqnic_rx_write_head_ptr(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget)
|
int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||||
|
int napi_budget)
|
||||||
{
|
{
|
||||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||||
struct mqnic_ring *ring = priv->rx_ring[cq_ring->ring_index];
|
struct mqnic_ring *ring = priv->rx_ring[cq_ring->ring_index];
|
||||||
@ -280,9 +285,7 @@ int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
|||||||
u32 len;
|
u32 len;
|
||||||
|
|
||||||
if (unlikely(!priv->port_up))
|
if (unlikely(!priv->port_up))
|
||||||
{
|
|
||||||
return done;
|
return done;
|
||||||
}
|
|
||||||
|
|
||||||
// process completion queue
|
// process completion queue
|
||||||
// read head pointer from NIC
|
// read head pointer from NIC
|
||||||
@ -293,49 +296,48 @@ int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
|||||||
|
|
||||||
mb(); // is a barrier here necessary? If so, what kind?
|
mb(); // is a barrier here necessary? If so, what kind?
|
||||||
|
|
||||||
while (cq_ring->head_ptr != cq_tail_ptr && done < budget)
|
while (cq_ring->head_ptr != cq_tail_ptr && done < budget) {
|
||||||
{
|
|
||||||
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * cq_ring->stride);
|
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * cq_ring->stride);
|
||||||
ring_index = le16_to_cpu(cpl->index) & ring->size_mask;
|
ring_index = le16_to_cpu(cpl->index) & ring->size_mask;
|
||||||
rx_info = &ring->rx_info[ring_index];
|
rx_info = &ring->rx_info[ring_index];
|
||||||
page = rx_info->page;
|
page = rx_info->page;
|
||||||
|
|
||||||
if (unlikely(!page))
|
if (unlikely(!page)) {
|
||||||
{
|
dev_err(priv->dev, "mqnic_process_rx_cq ring %d null page at index %d",
|
||||||
dev_err(priv->dev, "mqnic_process_rx_cq ring %d null page at index %d", cq_ring->ring_index, ring_index);
|
cq_ring->ring_index, ring_index);
|
||||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, cpl, MQNIC_CPL_SIZE, true);
|
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
|
||||||
|
cpl, MQNIC_CPL_SIZE, true);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = napi_get_frags(&cq_ring->napi);
|
skb = napi_get_frags(&cq_ring->napi);
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb)) {
|
||||||
{
|
dev_err(priv->dev, "mqnic_process_rx_cq ring %d failed to allocate skb",
|
||||||
dev_err(priv->dev, "mqnic_process_rx_cq ring %d failed to allocate skb", cq_ring->ring_index);
|
cq_ring->ring_index);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// RX hardware timestamp
|
// RX hardware timestamp
|
||||||
if (priv->if_features & MQNIC_IF_FEATURE_PTP_TS)
|
if (priv->if_features & MQNIC_IF_FEATURE_PTP_TS)
|
||||||
{
|
|
||||||
skb_hwtstamps(skb)->hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
|
skb_hwtstamps(skb)->hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
|
||||||
}
|
|
||||||
|
|
||||||
skb_record_rx_queue(skb, cq_ring->ring_index);
|
skb_record_rx_queue(skb, cq_ring->ring_index);
|
||||||
|
|
||||||
// RX hardware checksum
|
// RX hardware checksum
|
||||||
if (ndev->features & NETIF_F_RXCSUM)
|
if (ndev->features & NETIF_F_RXCSUM) {
|
||||||
{
|
|
||||||
skb->csum = csum_unfold((__sum16) cpu_to_be16(le16_to_cpu(cpl->rx_csum)));
|
skb->csum = csum_unfold((__sum16) cpu_to_be16(le16_to_cpu(cpl->rx_csum)));
|
||||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||||
}
|
}
|
||||||
|
|
||||||
// unmap
|
// unmap
|
||||||
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr), dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
|
dma_unmap_page(priv->dev, dma_unmap_addr(rx_info, dma_addr),
|
||||||
|
dma_unmap_len(rx_info, len), PCI_DMA_FROMDEVICE);
|
||||||
rx_info->dma_addr = 0;
|
rx_info->dma_addr = 0;
|
||||||
|
|
||||||
len = min_t(u32, le16_to_cpu(cpl->len), rx_info->len);
|
len = min_t(u32, le16_to_cpu(cpl->len), rx_info->len);
|
||||||
|
|
||||||
dma_sync_single_range_for_cpu(priv->dev, rx_info->dma_addr, rx_info->page_offset, rx_info->len, PCI_DMA_FROMDEVICE);
|
dma_sync_single_range_for_cpu(priv->dev, rx_info->dma_addr, rx_info->page_offset,
|
||||||
|
rx_info->len, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
__skb_fill_page_desc(skb, 0, page, rx_info->page_offset, len);
|
__skb_fill_page_desc(skb, 0, page, rx_info->page_offset, len);
|
||||||
rx_info->page = NULL;
|
rx_info->page = NULL;
|
||||||
@ -368,8 +370,7 @@ int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
|||||||
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
|
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
|
||||||
ring_index = ring_clean_tail_ptr & ring->size_mask;
|
ring_index = ring_clean_tail_ptr & ring->size_mask;
|
||||||
|
|
||||||
while (ring_clean_tail_ptr != ring->tail_ptr)
|
while (ring_clean_tail_ptr != ring->tail_ptr) {
|
||||||
{
|
|
||||||
rx_info = &ring->rx_info[ring_index];
|
rx_info = &ring->rx_info[ring_index];
|
||||||
|
|
||||||
if (rx_info->page)
|
if (rx_info->page)
|
||||||
@ -393,14 +394,10 @@ void mqnic_rx_irq(struct mqnic_cq_ring *cq)
|
|||||||
struct mqnic_priv *priv = netdev_priv(cq->ndev);
|
struct mqnic_priv *priv = netdev_priv(cq->ndev);
|
||||||
|
|
||||||
if (likely(priv->port_up))
|
if (likely(priv->port_up))
|
||||||
{
|
|
||||||
napi_schedule_irqoff(&cq->napi);
|
napi_schedule_irqoff(&cq->napi);
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
mqnic_arm_cq(cq);
|
mqnic_arm_cq(cq);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget)
|
int mqnic_poll_rx_cq(struct napi_struct *napi, int budget)
|
||||||
{
|
{
|
||||||
@ -411,9 +408,7 @@ int mqnic_poll_rx_cq(struct napi_struct *napi, int budget)
|
|||||||
done = mqnic_process_rx_cq(ndev, cq_ring, budget);
|
done = mqnic_process_rx_cq(ndev, cq_ring, budget);
|
||||||
|
|
||||||
if (done == budget)
|
if (done == budget)
|
||||||
{
|
|
||||||
return done;
|
return done;
|
||||||
}
|
|
||||||
|
|
||||||
napi_complete(napi);
|
napi_complete(napi);
|
||||||
|
|
||||||
@ -421,4 +416,3 @@ int mqnic_poll_rx_cq(struct napi_struct *napi, int budget)
|
|||||||
|
|
||||||
return done;
|
return done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
Copyright 2019, The Regents of the University of California.
|
Copyright 2019-2021, The Regents of the University of California.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
@ -34,15 +34,15 @@ either expressed or implied, of The Regents of the University of California.
|
|||||||
#include <linux/version.h>
|
#include <linux/version.h>
|
||||||
#include "mqnic.h"
|
#include "mqnic.h"
|
||||||
|
|
||||||
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr, int size, int stride, int index, u8 __iomem *hw_addr)
|
int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
||||||
|
int size, int stride, int index, u8 __iomem *hw_addr)
|
||||||
{
|
{
|
||||||
struct device *dev = priv->dev;
|
struct device *dev = priv->dev;
|
||||||
struct mqnic_ring *ring;
|
struct mqnic_ring *ring;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||||
if (!ring)
|
if (!ring) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to allocate TX ring");
|
dev_err(dev, "Failed to allocate TX ring");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -57,17 +57,16 @@ int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
|||||||
ring->desc_block_size = 1 << ring->log_desc_block_size;
|
ring->desc_block_size = 1 << ring->log_desc_block_size;
|
||||||
|
|
||||||
ring->tx_info = kvzalloc(sizeof(*ring->tx_info) * ring->size, GFP_KERNEL);
|
ring->tx_info = kvzalloc(sizeof(*ring->tx_info) * ring->size, GFP_KERNEL);
|
||||||
if (!ring->tx_info)
|
if (!ring->tx_info) {
|
||||||
{
|
|
||||||
dev_err(dev, "Failed to allocate tx_info");
|
dev_err(dev, "Failed to allocate tx_info");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail_ring;
|
goto fail_ring;
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->buf_size = ring->size * ring->stride;
|
ring->buf_size = ring->size * ring->stride;
|
||||||
ring->buf = dma_alloc_coherent(dev, ring->buf_size, &ring->buf_dma_addr, GFP_KERNEL);
|
ring->buf = dma_alloc_coherent(dev, ring->buf_size,
|
||||||
if (!ring->buf)
|
&ring->buf_dma_addr, GFP_KERNEL);
|
||||||
{
|
if (!ring->buf) {
|
||||||
dev_err(dev, "Failed to allocate TX ring DMA buffer");
|
dev_err(dev, "Failed to allocate TX ring DMA buffer");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail_info;
|
goto fail_info;
|
||||||
@ -93,7 +92,8 @@ int mqnic_create_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr,
|
|||||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
|
||||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
||||||
// set size
|
// set size
|
||||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||||
|
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
|
|
||||||
*ring_ptr = ring;
|
*ring_ptr = ring;
|
||||||
return 0;
|
return 0;
|
||||||
@ -123,7 +123,8 @@ void mqnic_destroy_tx_ring(struct mqnic_priv *priv, struct mqnic_ring **ring_ptr
|
|||||||
kfree(ring);
|
kfree(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int cpl_index)
|
int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
int cpl_index)
|
||||||
{
|
{
|
||||||
// deactivate queue
|
// deactivate queue
|
||||||
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(0, ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
@ -136,7 +137,8 @@ int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int
|
|||||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_HEAD_PTR_REG);
|
||||||
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
iowrite32(ring->tail_ptr & ring->hw_ptr_mask, ring->hw_addr + MQNIC_QUEUE_TAIL_PTR_REG);
|
||||||
// set size and activate queue
|
// set size and activate queue
|
||||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK, ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8) | MQNIC_QUEUE_ACTIVE_MASK,
|
||||||
|
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -144,7 +146,8 @@ int mqnic_activate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring, int
|
|||||||
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
void mqnic_deactivate_tx_ring(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
||||||
{
|
{
|
||||||
// deactivate queue
|
// deactivate queue
|
||||||
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8), ring->hw_addr+MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
iowrite32(ilog2(ring->size) | (ring->log_desc_block_size << 8),
|
||||||
|
ring->hw_addr + MQNIC_QUEUE_ACTIVE_LOG_SIZE_REG);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring)
|
bool mqnic_is_tx_ring_empty(const struct mqnic_ring *ring)
|
||||||
@ -167,7 +170,8 @@ void mqnic_tx_write_head_ptr(struct mqnic_ring *ring)
|
|||||||
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
|
iowrite32(ring->head_ptr & ring->hw_ptr_mask, ring->hw_head_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int index, int napi_budget)
|
void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
int index, int napi_budget)
|
||||||
{
|
{
|
||||||
struct mqnic_tx_info *tx_info = &ring->tx_info[index];
|
struct mqnic_tx_info *tx_info = &ring->tx_info[index];
|
||||||
struct sk_buff *skb = tx_info->skb;
|
struct sk_buff *skb = tx_info->skb;
|
||||||
@ -175,14 +179,14 @@ void mqnic_free_tx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring, int in
|
|||||||
|
|
||||||
prefetchw(&skb->users);
|
prefetchw(&skb->users);
|
||||||
|
|
||||||
dma_unmap_single(priv->dev, dma_unmap_addr(tx_info, dma_addr), dma_unmap_len(tx_info, len), PCI_DMA_TODEVICE);
|
dma_unmap_single(priv->dev, dma_unmap_addr(tx_info, dma_addr),
|
||||||
|
dma_unmap_len(tx_info, len), PCI_DMA_TODEVICE);
|
||||||
dma_unmap_addr_set(tx_info, dma_addr, 0);
|
dma_unmap_addr_set(tx_info, dma_addr, 0);
|
||||||
|
|
||||||
// unmap frags
|
// unmap frags
|
||||||
for (i = 0; i < tx_info->frag_count; i++)
|
for (i = 0; i < tx_info->frag_count; i++)
|
||||||
{
|
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr,
|
||||||
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr, tx_info->frags[i].len, PCI_DMA_TODEVICE);
|
tx_info->frags[i].len, PCI_DMA_TODEVICE);
|
||||||
}
|
|
||||||
|
|
||||||
napi_consume_skb(skb, napi_budget);
|
napi_consume_skb(skb, napi_budget);
|
||||||
tx_info->skb = NULL;
|
tx_info->skb = NULL;
|
||||||
@ -193,8 +197,7 @@ int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
|||||||
u32 index;
|
u32 index;
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
|
|
||||||
while (!mqnic_is_tx_ring_empty(ring))
|
while (!mqnic_is_tx_ring_empty(ring)) {
|
||||||
{
|
|
||||||
index = ring->clean_tail_ptr & ring->size_mask;
|
index = ring->clean_tail_ptr & ring->size_mask;
|
||||||
mqnic_free_tx_desc(priv, ring, index, 0);
|
mqnic_free_tx_desc(priv, ring, index, 0);
|
||||||
ring->clean_tail_ptr++;
|
ring->clean_tail_ptr++;
|
||||||
@ -208,7 +211,8 @@ int mqnic_free_tx_buf(struct mqnic_priv *priv, struct mqnic_ring *ring)
|
|||||||
return cnt;
|
return cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring, int napi_budget)
|
int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
||||||
|
int napi_budget)
|
||||||
{
|
{
|
||||||
struct mqnic_priv *priv = netdev_priv(ndev);
|
struct mqnic_priv *priv = netdev_priv(ndev);
|
||||||
struct mqnic_ring *ring = priv->tx_ring[cq_ring->ring_index];
|
struct mqnic_ring *ring = priv->tx_ring[cq_ring->ring_index];
|
||||||
@ -224,9 +228,7 @@ int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
|||||||
int budget = napi_budget;
|
int budget = napi_budget;
|
||||||
|
|
||||||
if (unlikely(!priv->port_up))
|
if (unlikely(!priv->port_up))
|
||||||
{
|
|
||||||
return done;
|
return done;
|
||||||
}
|
|
||||||
|
|
||||||
// prefetch for BQL
|
// prefetch for BQL
|
||||||
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
|
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
|
||||||
@ -238,21 +240,18 @@ int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
|||||||
cq_tail_ptr = cq_ring->tail_ptr;
|
cq_tail_ptr = cq_ring->tail_ptr;
|
||||||
cq_index = cq_tail_ptr & cq_ring->size_mask;
|
cq_index = cq_tail_ptr & cq_ring->size_mask;
|
||||||
|
|
||||||
while (cq_ring->head_ptr != cq_tail_ptr && done < budget)
|
while (cq_ring->head_ptr != cq_tail_ptr && done < budget) {
|
||||||
{
|
|
||||||
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * cq_ring->stride);
|
cpl = (struct mqnic_cpl *)(cq_ring->buf + cq_index * cq_ring->stride);
|
||||||
ring_index = le16_to_cpu(cpl->index) & ring->size_mask;
|
ring_index = le16_to_cpu(cpl->index) & ring->size_mask;
|
||||||
tx_info = &ring->tx_info[ring_index];
|
tx_info = &ring->tx_info[ring_index];
|
||||||
|
|
||||||
// TX hardware timestamp
|
// TX hardware timestamp
|
||||||
if (unlikely(tx_info->ts_requested))
|
if (unlikely(tx_info->ts_requested)) {
|
||||||
{
|
|
||||||
struct skb_shared_hwtstamps hwts;
|
struct skb_shared_hwtstamps hwts;
|
||||||
dev_info(priv->dev, "mqnic_process_tx_cq TX TS requested");
|
dev_info(priv->dev, "mqnic_process_tx_cq TX TS requested");
|
||||||
hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
|
hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
|
||||||
skb_tstamp_tx(tx_info->skb, &hwts);
|
skb_tstamp_tx(tx_info->skb, &hwts);
|
||||||
}
|
}
|
||||||
|
|
||||||
// free TX descriptor
|
// free TX descriptor
|
||||||
mqnic_free_tx_desc(priv, ring, ring_index, napi_budget);
|
mqnic_free_tx_desc(priv, ring, ring_index, napi_budget);
|
||||||
|
|
||||||
@ -276,8 +275,7 @@ int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
|||||||
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
|
ring_clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
|
||||||
ring_index = ring_clean_tail_ptr & ring->size_mask;
|
ring_index = ring_clean_tail_ptr & ring->size_mask;
|
||||||
|
|
||||||
while (ring_clean_tail_ptr != ring->tail_ptr)
|
while (ring_clean_tail_ptr != ring->tail_ptr) {
|
||||||
{
|
|
||||||
tx_info = &ring->tx_info[ring_index];
|
tx_info = &ring->tx_info[ring_index];
|
||||||
|
|
||||||
if (tx_info->skb)
|
if (tx_info->skb)
|
||||||
@ -295,9 +293,7 @@ int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
|
|||||||
|
|
||||||
// wake queue if it is stopped
|
// wake queue if it is stopped
|
||||||
if (netif_tx_queue_stopped(ring->tx_queue) && !mqnic_is_tx_ring_full(ring))
|
if (netif_tx_queue_stopped(ring->tx_queue) && !mqnic_is_tx_ring_full(ring))
|
||||||
{
|
|
||||||
netif_tx_wake_queue(ring->tx_queue);
|
netif_tx_wake_queue(ring->tx_queue);
|
||||||
}
|
|
||||||
|
|
||||||
return done;
|
return done;
|
||||||
}
|
}
|
||||||
@ -307,14 +303,10 @@ void mqnic_tx_irq(struct mqnic_cq_ring *cq)
|
|||||||
struct mqnic_priv *priv = netdev_priv(cq->ndev);
|
struct mqnic_priv *priv = netdev_priv(cq->ndev);
|
||||||
|
|
||||||
if (likely(priv->port_up))
|
if (likely(priv->port_up))
|
||||||
{
|
|
||||||
napi_schedule_irqoff(&cq->napi);
|
napi_schedule_irqoff(&cq->napi);
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
mqnic_arm_cq(cq);
|
mqnic_arm_cq(cq);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
|
int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
|
||||||
{
|
{
|
||||||
@ -325,9 +317,7 @@ int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
|
|||||||
done = mqnic_process_tx_cq(ndev, cq_ring, budget);
|
done = mqnic_process_tx_cq(ndev, cq_ring, budget);
|
||||||
|
|
||||||
if (done == budget)
|
if (done == budget)
|
||||||
{
|
|
||||||
return done;
|
return done;
|
||||||
}
|
|
||||||
|
|
||||||
napi_complete(napi);
|
napi_complete(napi);
|
||||||
|
|
||||||
@ -336,7 +326,9 @@ int mqnic_poll_tx_cq(struct napi_struct *napi, int budget)
|
|||||||
return done;
|
return done;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring, struct mqnic_tx_info *tx_info, struct mqnic_desc *tx_desc, struct sk_buff *skb)
|
static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring,
|
||||||
|
struct mqnic_tx_info *tx_info,
|
||||||
|
struct mqnic_desc *tx_desc, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||||
u32 i;
|
u32 i;
|
||||||
@ -347,16 +339,13 @@ static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring, stru
|
|||||||
tx_info->skb = skb;
|
tx_info->skb = skb;
|
||||||
tx_info->frag_count = 0;
|
tx_info->frag_count = 0;
|
||||||
|
|
||||||
for (i = 0; i < shinfo->nr_frags; i++)
|
for (i = 0; i < shinfo->nr_frags; i++) {
|
||||||
{
|
|
||||||
const skb_frag_t *frag = &shinfo->frags[i];
|
const skb_frag_t *frag = &shinfo->frags[i];
|
||||||
len = skb_frag_size(frag);
|
len = skb_frag_size(frag);
|
||||||
dma_addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
|
dma_addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
|
||||||
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
|
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
|
||||||
{
|
|
||||||
// mapping failed
|
// mapping failed
|
||||||
goto map_error;
|
goto map_error;
|
||||||
}
|
|
||||||
|
|
||||||
// write descriptor
|
// write descriptor
|
||||||
tx_desc[i + 1].len = cpu_to_le32(len);
|
tx_desc[i + 1].len = cpu_to_le32(len);
|
||||||
@ -368,8 +357,7 @@ static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring, stru
|
|||||||
tx_info->frags[i].dma_addr = dma_addr;
|
tx_info->frags[i].dma_addr = dma_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = tx_info->frag_count; i < ring->desc_block_size-1; i++)
|
for (i = tx_info->frag_count; i < ring->desc_block_size - 1; i++) {
|
||||||
{
|
|
||||||
tx_desc[i + 1].len = 0;
|
tx_desc[i + 1].len = 0;
|
||||||
tx_desc[i + 1].addr = 0;
|
tx_desc[i + 1].addr = 0;
|
||||||
}
|
}
|
||||||
@ -379,10 +367,8 @@ static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring, stru
|
|||||||
dma_addr = dma_map_single(priv->dev, skb->data, len, PCI_DMA_TODEVICE);
|
dma_addr = dma_map_single(priv->dev, skb->data, len, PCI_DMA_TODEVICE);
|
||||||
|
|
||||||
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
|
if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
|
||||||
{
|
|
||||||
// mapping failed
|
// mapping failed
|
||||||
goto map_error;
|
goto map_error;
|
||||||
}
|
|
||||||
|
|
||||||
// write descriptor
|
// write descriptor
|
||||||
tx_desc[0].len = cpu_to_le32(len);
|
tx_desc[0].len = cpu_to_le32(len);
|
||||||
@ -399,9 +385,8 @@ map_error:
|
|||||||
|
|
||||||
// unmap frags
|
// unmap frags
|
||||||
for (i = 0; i < tx_info->frag_count; i++)
|
for (i = 0; i < tx_info->frag_count; i++)
|
||||||
{
|
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr,
|
||||||
dma_unmap_page(priv->dev, tx_info->frags[i].dma_addr, tx_info->frags[i].len, PCI_DMA_TODEVICE);
|
tx_info->frags[i].len, PCI_DMA_TODEVICE);
|
||||||
}
|
|
||||||
|
|
||||||
// update tx_info
|
// update tx_info
|
||||||
tx_info->skb = NULL;
|
tx_info->skb = NULL;
|
||||||
@ -423,17 +408,13 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
u32 clean_tail_ptr;
|
u32 clean_tail_ptr;
|
||||||
|
|
||||||
if (unlikely(!priv->port_up))
|
if (unlikely(!priv->port_up))
|
||||||
{
|
|
||||||
goto tx_drop;
|
goto tx_drop;
|
||||||
}
|
|
||||||
|
|
||||||
ring_index = skb_get_queue_mapping(skb);
|
ring_index = skb_get_queue_mapping(skb);
|
||||||
|
|
||||||
if (unlikely(ring_index >= priv->tx_queue_count))
|
if (unlikely(ring_index >= priv->tx_queue_count))
|
||||||
{
|
|
||||||
// queue mapping out of range
|
// queue mapping out of range
|
||||||
goto tx_drop;
|
goto tx_drop;
|
||||||
}
|
|
||||||
|
|
||||||
ring = priv->tx_ring[ring_index];
|
ring = priv->tx_ring[ring_index];
|
||||||
|
|
||||||
@ -461,43 +442,33 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
unsigned int csum_start = skb_checksum_start_offset(skb);
|
unsigned int csum_start = skb_checksum_start_offset(skb);
|
||||||
unsigned int csum_offset = skb->csum_offset;
|
unsigned int csum_offset = skb->csum_offset;
|
||||||
|
|
||||||
if (csum_start > 255 || csum_offset > 127)
|
if (csum_start > 255 || csum_offset > 127) {
|
||||||
{
|
dev_info(priv->dev, "mqnic_start_xmit Hardware checksum fallback start %d offset %d",
|
||||||
dev_info(priv->dev, "mqnic_start_xmit Hardware checksum fallback start %d offset %d", csum_start, csum_offset);
|
csum_start, csum_offset);
|
||||||
|
|
||||||
// offset out of range, fall back on software checksum
|
// offset out of range, fall back on software checksum
|
||||||
if (skb_checksum_help(skb))
|
if (skb_checksum_help(skb)) {
|
||||||
{
|
|
||||||
// software checksumming failed
|
// software checksumming failed
|
||||||
goto tx_drop_count;
|
goto tx_drop_count;
|
||||||
}
|
}
|
||||||
tx_desc->tx_csum_cmd = 0;
|
tx_desc->tx_csum_cmd = 0;
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
tx_desc->tx_csum_cmd = cpu_to_le16(0x8000 | (csum_offset << 8) | (csum_start));
|
tx_desc->tx_csum_cmd = cpu_to_le16(0x8000 | (csum_offset << 8) | (csum_start));
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
tx_desc->tx_csum_cmd = 0;
|
tx_desc->tx_csum_cmd = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (shinfo->nr_frags > ring->desc_block_size-1 || (skb->data_len && skb->data_len < 32))
|
if (shinfo->nr_frags > ring->desc_block_size - 1 || (skb->data_len && skb->data_len < 32)) {
|
||||||
{
|
|
||||||
// too many frags or very short data portion; linearize
|
// too many frags or very short data portion; linearize
|
||||||
if (skb_linearize(skb))
|
if (skb_linearize(skb))
|
||||||
{
|
|
||||||
goto tx_drop_count;
|
goto tx_drop_count;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// map skb
|
// map skb
|
||||||
if (!mqnic_map_skb(priv, ring, tx_info, tx_desc, skb))
|
if (!mqnic_map_skb(priv, ring, tx_info, tx_desc, skb))
|
||||||
{
|
|
||||||
// map failed
|
// map failed
|
||||||
goto tx_drop_count;
|
goto tx_drop_count;
|
||||||
}
|
|
||||||
|
|
||||||
// count packet
|
// count packet
|
||||||
ring->packets++;
|
ring->packets++;
|
||||||
@ -509,9 +480,9 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
skb_tx_timestamp(skb);
|
skb_tx_timestamp(skb);
|
||||||
|
|
||||||
stop_queue = mqnic_is_tx_ring_full(ring);
|
stop_queue = mqnic_is_tx_ring_full(ring);
|
||||||
if (unlikely(stop_queue))
|
if (unlikely(stop_queue)) {
|
||||||
{
|
dev_info(priv->dev, "mqnic_start_xmit TX ring %d full on port %d",
|
||||||
dev_info(priv->dev, "mqnic_start_xmit TX ring %d full on port %d", ring_index, priv->port);
|
ring_index, priv->port);
|
||||||
netif_tx_stop_queue(ring->tx_queue);
|
netif_tx_stop_queue(ring->tx_queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -521,27 +492,23 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
|
|
||||||
// enqueue on NIC
|
// enqueue on NIC
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
|
||||||
if (unlikely(!netdev_xmit_more() || stop_queue))
|
if (unlikely(!netdev_xmit_more() || stop_queue)) {
|
||||||
#else
|
#else
|
||||||
if (unlikely(!skb->xmit_more || stop_queue))
|
if (unlikely(!skb->xmit_more || stop_queue)) {
|
||||||
#endif
|
#endif
|
||||||
{
|
|
||||||
dma_wmb();
|
dma_wmb();
|
||||||
mqnic_tx_write_head_ptr(ring);
|
mqnic_tx_write_head_ptr(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if queue restarted
|
// check if queue restarted
|
||||||
if (unlikely(stop_queue))
|
if (unlikely(stop_queue)) {
|
||||||
{
|
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
|
|
||||||
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
|
clean_tail_ptr = READ_ONCE(ring->clean_tail_ptr);
|
||||||
|
|
||||||
if (unlikely(!mqnic_is_tx_ring_full(ring)))
|
if (unlikely(!mqnic_is_tx_ring_full(ring)))
|
||||||
{
|
|
||||||
netif_tx_wake_queue(ring->tx_queue);
|
netif_tx_wake_queue(ring->tx_queue);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user