1
0
mirror of https://github.com/corundum/corundum.git synced 2025-02-06 08:38:23 +08:00

Use __func__ for function name in debug messages

This commit is contained in:
Alex Forencich 2021-10-21 14:44:05 -07:00
parent 79f778d85a
commit 323791cff3
6 changed files with 39 additions and 39 deletions

View File

@ -58,8 +58,8 @@ static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *v
int ret; int ret;
if (map_size > mqnic->hw_regs_size) { if (map_size > mqnic->hw_regs_size) {
dev_err(mqnic->dev, "mqnic_map_registers: Tried to map registers region with wrong size %lu (expected <= %llu)", dev_err(mqnic->dev, "%s: Tried to map registers region with wrong size %lu (expected <= %llu)",
vma->vm_end - vma->vm_start, mqnic->hw_regs_size); __func__, vma->vm_end - vma->vm_start, mqnic->hw_regs_size);
return -EINVAL; return -EINVAL;
} }
@ -67,10 +67,10 @@ static int mqnic_map_registers(struct mqnic_dev *mqnic, struct vm_area_struct *v
map_size, pgprot_noncached(vma->vm_page_prot)); map_size, pgprot_noncached(vma->vm_page_prot));
if (ret) if (ret)
dev_err(mqnic->dev, "mqnic_map_registers: remap_pfn_range failed for registers region"); dev_err(mqnic->dev, "%s: remap_pfn_range failed for registers region", __func__);
else else
dev_dbg(mqnic->dev, "mqnic_map_registers: Mapped registers region at phys: 0x%pap, virt: 0x%p", dev_dbg(mqnic->dev, "%s: Mapped registers region at phys: 0x%pap, virt: 0x%p",
&mqnic->hw_regs_phys, (void *)vma->vm_start); __func__, &mqnic->hw_regs_phys, (void *)vma->vm_start);
return ret; return ret;
} }
@ -83,8 +83,8 @@ static int mqnic_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_pgoff == 0) if (vma->vm_pgoff == 0)
return mqnic_map_registers(mqnic, vma); return mqnic_map_registers(mqnic, vma);
dev_err(mqnic->dev, "mqnic_mmap: Tried to map an unknown region at page offset %lu", dev_err(mqnic->dev, "%s: Tried to map an unknown region at page offset %lu",
vma->vm_pgoff); __func__, vma->vm_pgoff);
return -EINVAL; return -EINVAL;
} }

View File

@ -184,8 +184,8 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
if (event->type == MQNIC_EVENT_TYPE_TX_CPL) { if (event->type == MQNIC_EVENT_TYPE_TX_CPL) {
// transmit completion event // transmit completion event
if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count)) { if (unlikely(le16_to_cpu(event->source) > priv->tx_cpl_queue_count)) {
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", dev_err(priv->dev, "%s on port %d: unknown event source %d (index %d, type %d)",
priv->port, le16_to_cpu(event->source), eq_index, __func__, priv->port, le16_to_cpu(event->source), eq_index,
le16_to_cpu(event->type)); le16_to_cpu(event->type));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
event, MQNIC_EVENT_SIZE, true); event, MQNIC_EVENT_SIZE, true);
@ -197,8 +197,8 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
} else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL) { } else if (le16_to_cpu(event->type) == MQNIC_EVENT_TYPE_RX_CPL) {
// receive completion event // receive completion event
if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count)) { if (unlikely(le16_to_cpu(event->source) > priv->rx_cpl_queue_count)) {
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event source %d (index %d, type %d)", dev_err(priv->dev, "%s on port %d: unknown event source %d (index %d, type %d)",
priv->port, le16_to_cpu(event->source), eq_index, __func__, priv->port, le16_to_cpu(event->source), eq_index,
le16_to_cpu(event->type)); le16_to_cpu(event->type));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
event, MQNIC_EVENT_SIZE, true); event, MQNIC_EVENT_SIZE, true);
@ -208,8 +208,8 @@ void mqnic_process_eq(struct net_device *ndev, struct mqnic_eq_ring *eq_ring)
cq_ring->handler(cq_ring); cq_ring->handler(cq_ring);
} }
} else { } else {
dev_err(priv->dev, "mqnic_process_eq on port %d: unknown event type %d (index %d, source %d)", dev_err(priv->dev, "%s on port %d: unknown event type %d (index %d, source %d)",
priv->port, le16_to_cpu(event->type), eq_index, __func__, priv->port, le16_to_cpu(event->type), eq_index,
le16_to_cpu(event->source)); le16_to_cpu(event->source));
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
event, MQNIC_EVENT_SIZE, true); event, MQNIC_EVENT_SIZE, true);

View File

@ -39,7 +39,7 @@ static int mqnic_start_port(struct net_device *ndev)
struct mqnic_dev *mdev = priv->mdev; struct mqnic_dev *mdev = priv->mdev;
int k; int k;
dev_info(mdev->dev, "mqnic_start_port on port %d", priv->port); dev_info(mdev->dev, "%s on port %d", __func__, priv->port);
// set up event queues // set up event queues
for (k = 0; k < priv->event_queue_count; k++) { for (k = 0; k < priv->event_queue_count; k++) {
@ -117,7 +117,7 @@ static int mqnic_stop_port(struct net_device *ndev)
struct mqnic_dev *mdev = priv->mdev; struct mqnic_dev *mdev = priv->mdev;
int k; int k;
dev_info(mdev->dev, "mqnic_stop_port on port %d", priv->port); dev_info(mdev->dev, "%s on port %d", __func__, priv->port);
netif_tx_lock_bh(ndev); netif_tx_lock_bh(ndev);
// if (detach) // if (detach)

View File

@ -59,7 +59,7 @@ static int mqnic_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
bool neg = false; bool neg = false;
u64 nom_per_fns, adj; u64 nom_per_fns, adj;
dev_info(mdev->dev, "mqnic_phc_adjfine scaled_ppm: %ld", scaled_ppm); dev_info(mdev->dev, "%s: scaled_ppm: %ld", __func__, scaled_ppm);
if (scaled_ppm < 0) { if (scaled_ppm < 0) {
neg = true; neg = true;
@ -82,7 +82,7 @@ static int mqnic_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
iowrite32(adj & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_FNS); iowrite32(adj & 0xffffffff, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_FNS);
iowrite32(adj >> 32, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_NS); iowrite32(adj >> 32, mdev->phc_hw_addr + MQNIC_PHC_REG_PTP_PERIOD_NS);
dev_info(mdev->dev, "mqnic_phc_adjfine adj: 0x%llx", adj); dev_info(mdev->dev, "%s adj: 0x%llx", __func__, adj);
return 0; return 0;
} }
@ -133,7 +133,7 @@ static int mqnic_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info); struct mqnic_dev *mdev = container_of(ptp, struct mqnic_dev, ptp_clock_info);
struct timespec64 ts; struct timespec64 ts;
dev_info(mdev->dev, "mqnic_phc_adjtime delta: %lld", delta); dev_info(mdev->dev, "%s: delta: %lld", __func__, delta);
if (delta > 1000000000 || delta < -1000000000) { if (delta > 1000000000 || delta < -1000000000) {
mqnic_phc_gettime(ptp, &ts); mqnic_phc_gettime(ptp, &ts);
@ -181,9 +181,9 @@ static int mqnic_phc_perout(struct ptp_clock_info *ptp, int on, struct ptp_perou
width_sec = period_sec >> 1; width_sec = period_sec >> 1;
width_nsec = (period_nsec + (period_sec & 1 ? NSEC_PER_SEC : 0)) >> 1; width_nsec = (period_nsec + (period_sec & 1 ? NSEC_PER_SEC : 0)) >> 1;
dev_info(mdev->dev, "mqnic_phc_perout start: %lld.%09d", start_sec, start_nsec); dev_info(mdev->dev, "%s: start: %lld.%09d", __func__, start_sec, start_nsec);
dev_info(mdev->dev, "mqnic_phc_perout period: %lld.%09d", period_sec, period_nsec); dev_info(mdev->dev, "%s: period: %lld.%09d", __func__, period_sec, period_nsec);
dev_info(mdev->dev, "mqnic_phc_perout width: %lld.%09d", width_sec, width_nsec); dev_info(mdev->dev, "%s: width: %lld.%09d", __func__, width_sec, width_nsec);
iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_START_FNS); iowrite32(0, hw_addr + MQNIC_PHC_REG_PEROUT_START_FNS);
iowrite32(start_nsec, hw_addr + MQNIC_PHC_REG_PEROUT_START_NS); iowrite32(start_nsec, hw_addr + MQNIC_PHC_REG_PEROUT_START_NS);
@ -265,7 +265,7 @@ void mqnic_register_phc(struct mqnic_dev *mdev)
if (IS_ERR(mdev->ptp_clock)) { if (IS_ERR(mdev->ptp_clock)) {
mdev->ptp_clock = NULL; mdev->ptp_clock = NULL;
dev_err(mdev->dev, "ptp_clock_register failed"); dev_err(mdev->dev, "%s: failed", __func__);
} else { } else {
dev_info(mdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock)); dev_info(mdev->dev, "registered PHC (index %d)", ptp_clock_index(mdev->ptp_clock));

View File

@ -209,15 +209,15 @@ int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
dma_addr_t dma_addr; dma_addr_t dma_addr;
if (unlikely(page)) { if (unlikely(page)) {
dev_err(priv->dev, "mqnic_prepare_rx_desc skb not yet processed on port %d", dev_err(priv->dev, "%s: skb not yet processed on port %d",
priv->port); __func__, priv->port);
return -1; return -1;
} }
page = dev_alloc_pages(page_order); page = dev_alloc_pages(page_order);
if (unlikely(!page)) { if (unlikely(!page)) {
dev_err(priv->dev, "mqnic_prepare_rx_desc failed to allocate memory on port %d", dev_err(priv->dev, "%s: failed to allocate memory on port %d",
priv->port); __func__, priv->port);
return -1; return -1;
} }
@ -225,8 +225,8 @@ int mqnic_prepare_rx_desc(struct mqnic_priv *priv, struct mqnic_ring *ring,
dma_addr = dma_map_page(priv->dev, page, 0, len, PCI_DMA_FROMDEVICE); dma_addr = dma_map_page(priv->dev, page, 0, len, PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(priv->dev, dma_addr))) { if (unlikely(dma_mapping_error(priv->dev, dma_addr))) {
dev_err(priv->dev, "mqnic_prepare_rx_desc DMA mapping failed on port %d", dev_err(priv->dev, "%s: DMA mapping failed on port %d",
priv->port); __func__, priv->port);
__free_pages(page, page_order); __free_pages(page, page_order);
return -1; return -1;
} }
@ -299,8 +299,8 @@ int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
page = rx_info->page; page = rx_info->page;
if (unlikely(!page)) { if (unlikely(!page)) {
dev_err(priv->dev, "mqnic_process_rx_cq ring %d null page at index %d", dev_err(priv->dev, "%s: ring %d null page at index %d",
cq_ring->ring_index, ring_index); __func__, cq_ring->ring_index, ring_index);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1, print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 1,
cpl, MQNIC_CPL_SIZE, true); cpl, MQNIC_CPL_SIZE, true);
break; break;
@ -308,8 +308,8 @@ int mqnic_process_rx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
skb = napi_get_frags(&cq_ring->napi); skb = napi_get_frags(&cq_ring->napi);
if (unlikely(!skb)) { if (unlikely(!skb)) {
dev_err(priv->dev, "mqnic_process_rx_cq ring %d failed to allocate skb", dev_err(priv->dev, "%s: ring %d failed to allocate skb",
cq_ring->ring_index); __func__, cq_ring->ring_index);
break; break;
} }

View File

@ -244,7 +244,7 @@ int mqnic_process_tx_cq(struct net_device *ndev, struct mqnic_cq_ring *cq_ring,
// TX hardware timestamp // TX hardware timestamp
if (unlikely(tx_info->ts_requested)) { if (unlikely(tx_info->ts_requested)) {
dev_info(priv->dev, "mqnic_process_tx_cq TX TS requested"); dev_info(priv->dev, "%s: TX TS requested", __func__);
hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl); hwts.hwtstamp = mqnic_read_cpl_ts(priv->mdev, ring, cpl);
skb_tstamp_tx(tx_info->skb, &hwts); skb_tstamp_tx(tx_info->skb, &hwts);
} }
@ -378,7 +378,7 @@ static bool mqnic_map_skb(struct mqnic_priv *priv, struct mqnic_ring *ring,
return true; return true;
map_error: map_error:
dev_err(priv->dev, "mqnic_map_skb DMA mapping failed"); dev_err(priv->dev, "%s: DMA mapping failed", __func__);
// unmap frags // unmap frags
for (i = 0; i < tx_info->frag_count; i++) for (i = 0; i < tx_info->frag_count; i++)
@ -429,7 +429,7 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
// TX hardware timestamp // TX hardware timestamp
tx_info->ts_requested = 0; tx_info->ts_requested = 0;
if (unlikely(priv->if_features & MQNIC_IF_FEATURE_PTP_TS && shinfo->tx_flags & SKBTX_HW_TSTAMP)) { if (unlikely(priv->if_features & MQNIC_IF_FEATURE_PTP_TS && shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
dev_info(priv->dev, "mqnic_start_xmit TX TS requested"); dev_info(priv->dev, "%s: TX TS requested", __func__);
shinfo->tx_flags |= SKBTX_IN_PROGRESS; shinfo->tx_flags |= SKBTX_IN_PROGRESS;
tx_info->ts_requested = 1; tx_info->ts_requested = 1;
} }
@ -440,8 +440,8 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned int csum_offset = skb->csum_offset; unsigned int csum_offset = skb->csum_offset;
if (csum_start > 255 || csum_offset > 127) { if (csum_start > 255 || csum_offset > 127) {
dev_info(priv->dev, "mqnic_start_xmit Hardware checksum fallback start %d offset %d", dev_info(priv->dev, "%s: Hardware checksum fallback start %d offset %d",
csum_start, csum_offset); __func__, csum_start, csum_offset);
// offset out of range, fall back on software checksum // offset out of range, fall back on software checksum
if (skb_checksum_help(skb)) { if (skb_checksum_help(skb)) {
@ -478,8 +478,8 @@ netdev_tx_t mqnic_start_xmit(struct sk_buff *skb, struct net_device *ndev)
stop_queue = mqnic_is_tx_ring_full(ring); stop_queue = mqnic_is_tx_ring_full(ring);
if (unlikely(stop_queue)) { if (unlikely(stop_queue)) {
dev_info(priv->dev, "mqnic_start_xmit TX ring %d full on port %d", dev_info(priv->dev, "%s: TX ring %d full on port %d",
ring_index, priv->port); __func__, ring_index, priv->port);
netif_tx_stop_queue(ring->tx_queue); netif_tx_stop_queue(ring->tx_queue);
} }