Merge: BNX2X driver updates for 9.7

MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/6951

JIRA: https://issues.redhat.com/browse/RHEL-94578

JIRA: https://issues.redhat.com/browse/RHEL-93390

Bn2x* drivers updates for RHEL-9.7

Signed-off-by: John Meneghini <jmeneghi@redhat.com>

Approved-by: Ewan D. Milne <emilne@redhat.com>
Approved-by: Chris Leech <cleech@redhat.com>
Approved-by: José Ignacio Tornos Martínez <jtornosm@redhat.com>
Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com>

Merged-by: Augusto Caringi <acaringi@redhat.com>
This commit is contained in:
Augusto Caringi 2025-06-17 15:42:42 -03:00
commit 6ece7013ba
21 changed files with 300 additions and 356 deletions

View File

@ -175,12 +175,12 @@ static const struct flash_spec flash_table[] =
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
"Entry 0101: ST M45PE10 (128kB non-bufferred)"},
"Entry 0101: ST M45PE10 (128kB non-buffered)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
"Entry 0110: ST M45PE20 (256kB non-bufferred)"},
"Entry 0110: ST M45PE20 (256kB non-buffered)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
@ -3829,7 +3829,7 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
return 0;
}
static int
static void
load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
const struct bnx2_mips_fw_file_entry *fw_entry)
{
@ -3897,48 +3897,34 @@ load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
val &= ~cpu_reg->mode_value_halt;
bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
return 0;
}
static int
static void
bnx2_init_cpus(struct bnx2 *bp)
{
const struct bnx2_mips_fw_file *mips_fw =
(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
const struct bnx2_rv2p_fw_file *rv2p_fw =
(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
int rc;
/* Initialize the RV2P processor. */
load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
/* Initialize the RX Processor. */
rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
if (rc)
goto init_cpu_err;
load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
/* Initialize the TX Processor. */
rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
if (rc)
goto init_cpu_err;
load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
/* Initialize the TX Patch-up Processor. */
rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
if (rc)
goto init_cpu_err;
load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
/* Initialize the Completion Processor. */
rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
if (rc)
goto init_cpu_err;
load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
/* Initialize the Command Processor. */
rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
init_cpu_err:
return rc;
load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
}
static void
@ -4951,8 +4937,7 @@ bnx2_init_chip(struct bnx2 *bp)
} else
bnx2_init_context(bp);
if ((rc = bnx2_init_cpus(bp)) != 0)
return rc;
bnx2_init_cpus(bp);
bnx2_init_nvram(bp);
@ -5415,8 +5400,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
bp->rx_buf_use_size = rx_size;
/* hw alignment + build_skb() overhead*/
bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_buf_size = kmalloc_size_roundup(
SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
@ -8208,7 +8194,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
if (rc) {
dev_err(&pdev->dev,
"pci_set_consistent_dma_mask failed, aborting\n");
"dma_set_coherent_mask failed, aborting\n");
goto err_out_unmap;
}
} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {

View File

@ -39,34 +39,34 @@ static const struct {
int size;
char string[ETH_GSTRING_LEN];
} bnx2x_q_stats_arr[] = {
/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
8, "[%s]: rx_ucast_packets" },
8, "[%d]: rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
8, "[%s]: rx_mcast_packets" },
8, "[%d]: rx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
8, "[%s]: rx_bcast_packets" },
{ Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
8, "[%d]: rx_bcast_packets" },
{ Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
{ Q_STATS_OFFSET32(rx_err_discard_pkt),
4, "[%s]: rx_phy_ip_err_discards"},
4, "[%d]: rx_phy_ip_err_discards"},
{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
4, "[%s]: rx_skb_alloc_discard" },
{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
{ Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" },
{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
4, "[%d]: rx_skb_alloc_discard" },
{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
{ Q_STATS_OFFSET32(driver_xoff), 4, "[%d]: tx_exhaustion_events" },
{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8, "[%s]: tx_ucast_packets" },
8, "[%d]: tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8, "[%s]: tx_mcast_packets" },
8, "[%d]: tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8, "[%s]: tx_bcast_packets" },
8, "[%d]: tx_bcast_packets" },
{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
8, "[%s]: tpa_aggregations" },
8, "[%d]: tpa_aggregations" },
{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
8, "[%s]: tpa_aggregated_frames"},
{ Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
8, "[%d]: tpa_aggregated_frames"},
{ Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%d]: tpa_bytes"},
{ Q_STATS_OFFSET32(driver_filtered_tx_pkt),
4, "[%s]: driver_filtered_tx_pkt" }
4, "[%d]: driver_filtered_tx_pkt" }
};
#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@ -3184,49 +3184,43 @@ static u32 bnx2x_get_private_flags(struct net_device *dev)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
int i, j, k, start;
char queue_name[MAX_QUEUE_NAME_LEN+1];
const char *str;
int i, j, start;
switch (stringset) {
case ETH_SS_STATS:
k = 0;
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
memset(queue_name, 0, sizeof(queue_name));
snprintf(queue_name, sizeof(queue_name),
"%d", i);
for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
snprintf(buf + (k + j)*ETH_GSTRING_LEN,
ETH_GSTRING_LEN,
bnx2x_q_stats_arr[j].string,
queue_name);
k += BNX2X_NUM_Q_STATS;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
str = bnx2x_q_stats_arr[j].string;
ethtool_sprintf(&buf, str, i);
}
}
}
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
for (i = 0; i < BNX2X_NUM_STATS; i++) {
if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
strcpy(buf + (k + j)*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
j++;
ethtool_puts(&buf, bnx2x_stats_arr[i].string);
}
break;
case ETH_SS_TEST:
if (IS_VF(bp))
break;
/* First 4 tests cannot be done in MF mode */
if (!IS_MF(bp))
start = 0;
else
start = 4;
memcpy(buf, bnx2x_tests_str_arr + start,
ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
for (i = start; i < BNX2X_NUM_TESTS_SF; i++)
ethtool_puts(&buf, bnx2x_tests_str_arr[i]);
break;
case ETH_SS_PRIV_FLAGS:
memcpy(buf, bnx2x_private_arr,
ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
for (i = 0; i < BNX2X_PRI_FLAG_LEN; i++)
ethtool_puts(&buf, bnx2x_private_arr[i]);
break;
}
}

View File

@ -31,6 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
#include <linux/workqueue.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define BCM_VLAN 1
#endif
@ -3015,9 +3016,9 @@ static int cnic_service_bnx2(void *data, void *status_blk)
return cnic_service_bnx2_queues(dev);
}
static void cnic_service_bnx2_msix(struct tasklet_struct *t)
static void cnic_service_bnx2_msix(struct work_struct *work)
{
struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
@ -3036,7 +3037,7 @@ static void cnic_doirq(struct cnic_dev *dev)
prefetch(cp->status_blk.gen);
prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
tasklet_schedule(&cp->cnic_irq_task);
queue_work(system_bh_wq, &cp->cnic_irq_bh_work);
}
}
@ -3140,9 +3141,9 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
return last_status;
}
static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
static void cnic_service_bnx2x_bh_work(struct work_struct *work)
{
struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
@ -4429,7 +4430,7 @@ static void cnic_free_irq(struct cnic_dev *dev)
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
cp->disable_int_sync(dev);
tasklet_kill(&cp->cnic_irq_task);
cancel_work_sync(&cp->cnic_irq_bh_work);
free_irq(ethdev->irq_arr[0].vector, dev);
}
}
@ -4442,7 +4443,7 @@ static int cnic_request_irq(struct cnic_dev *dev)
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
if (err)
tasklet_disable(&cp->cnic_irq_task);
disable_work_sync(&cp->cnic_irq_bh_work);
return err;
}
@ -4465,7 +4466,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2_msix);
err = cnic_request_irq(dev);
if (err)
return err;
@ -4874,7 +4875,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2x_bh_work);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);

View File

@ -268,7 +268,7 @@ struct cnic_local {
u32 bnx2x_igu_sb_id;
u32 int_num;
u32 last_status_idx;
struct tasklet_struct cnic_irq_task;
struct work_struct cnic_irq_bh_work;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];

View File

@ -137,8 +137,6 @@
#define BNX2FC_FW_TIMEOUT (3 * HZ)
#define PORT_MAX 2
#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
/* FC FCP Status */
#define FC_GOOD 0
@ -360,18 +358,12 @@ struct bnx2fc_rport {
dma_addr_t lcq_dma;
u32 lcq_mem_size;
void *ofld_req[4];
dma_addr_t ofld_req_dma[4];
void *enbl_req;
dma_addr_t enbl_req_dma;
spinlock_t tgt_lock;
spinlock_t cq_lock;
atomic_t num_active_ios;
u32 flush_in_prog;
unsigned long timestamp;
unsigned long retry_delay_timestamp;
struct list_head free_task_list;
struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
struct list_head active_cmd_queue;
struct list_head els_queue;
@ -386,6 +378,7 @@ struct bnx2fc_rport {
};
struct bnx2fc_mp_req {
u64 tm_lun;
u8 tm_flags;
u32 req_len;
@ -493,7 +486,14 @@ struct bnx2fc_unsol_els {
struct work_struct unsol_els_work;
};
struct bnx2fc_priv {
struct bnx2fc_cmd *io_req;
};
static inline struct bnx2fc_priv *bnx2fc_priv(struct scsi_cmnd *cmd)
{
return scsi_cmd_priv(cmd);
}
struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);

View File

@ -273,7 +273,6 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct fcoe_port *port;
struct fcoe_hdr *hp;
struct bnx2fc_rport *tgt;
struct fc_stats *stats;
u8 sof, eof;
u32 crc;
unsigned int hlen, tlen, elen;
@ -399,10 +398,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
/*update tx stats */
stats = per_cpu_ptr(lport->stats, get_cpu());
stats->TxFrames++;
stats->TxWords += wlen;
put_cpu();
this_cpu_inc(lport->stats->TxFrames);
this_cpu_add(lport->stats->TxWords, wlen);
/* send down to lld */
fr_dev(fp) = lport;
@ -432,7 +429,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct fcoe_ctlr *ctlr;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
struct sk_buff *tmp_skb;
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
@ -444,11 +440,9 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
goto err;
}
tmp_skb = skb_share_check(skb, GFP_ATOMIC);
if (!tmp_skb)
goto err;
skb = tmp_skb;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return -1;
if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
@ -512,7 +506,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
u32 fr_len, fr_crc;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fc_stats *stats;
struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
@ -543,10 +536,8 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
stats = per_cpu_ptr(lport->stats, get_cpu());
stats->RxFrames++;
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
put_cpu();
this_cpu_inc(lport->stats->RxFrames);
this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE);
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
@ -633,9 +624,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
fr_crc = le32_to_cpu(fr_crc(fp));
if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
stats = per_cpu_ptr(lport->stats, get_cpu());
crc_err = (stats->InvalidCRCCount++);
put_cpu();
crc_err = this_cpu_inc_return(lport->stats->InvalidCRCCount);
if (crc_err < 5)
printk(KERN_WARNING PFX "dropping frame with "
"CRC error\n");
@ -964,9 +953,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
mutex_unlock(&lport->lp_mutex);
fc_host_port_type(lport->host) =
FC_PORTTYPE_UNKNOWN;
per_cpu_ptr(lport->stats,
get_cpu())->LinkFailureCount++;
put_cpu();
this_cpu_inc(lport->stats->LinkFailureCount);
fcoe_clean_pending_queue(lport);
wait_for_upload = 1;
}
@ -1747,32 +1734,32 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
switch (pdev->device) {
case PCI_DEVICE_ID_NX2_57710:
strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
strscpy(hba->chip_num, "BCM57710", sizeof(hba->chip_num));
break;
case PCI_DEVICE_ID_NX2_57711:
strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
strscpy(hba->chip_num, "BCM57711", sizeof(hba->chip_num));
break;
case PCI_DEVICE_ID_NX2_57712:
case PCI_DEVICE_ID_NX2_57712_MF:
case PCI_DEVICE_ID_NX2_57712_VF:
strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
strscpy(hba->chip_num, "BCM57712", sizeof(hba->chip_num));
break;
case PCI_DEVICE_ID_NX2_57800:
case PCI_DEVICE_ID_NX2_57800_MF:
case PCI_DEVICE_ID_NX2_57800_VF:
strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
strscpy(hba->chip_num, "BCM57800", sizeof(hba->chip_num));
break;
case PCI_DEVICE_ID_NX2_57810:
case PCI_DEVICE_ID_NX2_57810_MF:
case PCI_DEVICE_ID_NX2_57810_VF:
strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
strscpy(hba->chip_num, "BCM57810", sizeof(hba->chip_num));
break;
case PCI_DEVICE_ID_NX2_57840:
case PCI_DEVICE_ID_NX2_57840_MF:
case PCI_DEVICE_ID_NX2_57840_VF:
case PCI_DEVICE_ID_NX2_57840_2_20:
case PCI_DEVICE_ID_NX2_57840_4_10:
strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
strscpy(hba->chip_num, "BCM57840", sizeof(hba->chip_num));
break;
default:
pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
@ -1810,7 +1797,7 @@ static int bnx2fc_ulp_get_stats(void *handle)
if (!stats_addr)
return -EINVAL;
strncpy(stats_addr->version, BNX2FC_VERSION,
strscpy(stats_addr->version, BNX2FC_VERSION,
sizeof(stats_addr->version));
stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
@ -2623,14 +2610,11 @@ static int bnx2fc_cpu_online(unsigned int cpu)
p = &per_cpu(bnx2fc_percpu, cpu);
thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
(void *)p, cpu_to_node(cpu),
"bnx2fc_thread/%d", cpu);
thread = kthread_create_on_cpu(bnx2fc_percpu_io_thread,
(void *)p, cpu, "bnx2fc_thread/%d");
if (IS_ERR(thread))
return PTR_ERR(thread);
/* bind thread to the cpu */
kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
@ -2972,6 +2956,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.track_queue_depth = 1,
.slave_configure = bnx2fc_slave_configure,
.shost_attrs = bnx2fc_host_attrs,
.cmd_size = sizeof(struct bnx2fc_priv),
};
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {

View File

@ -1709,7 +1709,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
struct fcoe_cached_sge_ctx *cached_sge;
struct fcoe_ext_mul_sges_ctx *sgl;
int dev_type = tgt->dev_type;
u64 *fcp_cmnd;
struct fcp_cmnd *fcp_cmnd;
u64 *raw_fcp_cmnd;
u64 tmp_fcp_cmnd[4];
u32 context_id;
int cnt, i;
@ -1778,16 +1779,19 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
/* Fill FCP_CMND IU */
fcp_cmnd = (u64 *)
fcp_cmnd = (struct fcp_cmnd *)&tmp_fcp_cmnd;
bnx2fc_build_fcp_cmnd(io_req, fcp_cmnd);
int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
raw_fcp_cmnd = (u64 *)
task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
/* swap fcp_cmnd */
cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
for (i = 0; i < cnt; i++) {
*fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
fcp_cmnd++;
*raw_fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
raw_fcp_cmnd++;
}
/* Rx Write Tx Read */

View File

@ -204,8 +204,8 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
sc_cmd->allowed);
scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
sc_cmd->SCp.ptr = NULL;
sc_cmd->scsi_done(sc_cmd);
bnx2fc_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
}
struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
@ -656,10 +656,9 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
return SUCCESS;
}
static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
static int bnx2fc_initiate_tmf(struct fc_lport *lport, struct fc_rport *rport,
u64 tm_lun, u8 tm_flags)
{
struct fc_lport *lport;
struct fc_rport *rport;
struct fc_rport_libfc_priv *rp;
struct fcoe_port *port;
struct bnx2fc_interface *interface;
@ -668,7 +667,6 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
struct bnx2fc_mp_req *tm_req;
struct fcoe_task_ctx_entry *task;
struct fcoe_task_ctx_entry *task_page;
struct Scsi_Host *host = sc_cmd->device->host;
struct fc_frame_header *fc_hdr;
struct fcp_cmnd *fcp_cmnd;
int task_idx, index;
@ -677,8 +675,6 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
u32 sid, did;
unsigned long start = jiffies;
lport = shost_priv(host);
rport = starget_to_rport(scsi_target(sc_cmd->device));
port = lport_priv(lport);
interface = port->priv;
@ -689,7 +685,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
}
rp = rport->dd_data;
rc = fc_block_scsi_eh(sc_cmd);
rc = fc_block_rport(rport);
if (rc)
return rc;
@ -718,7 +714,7 @@ retry_tmf:
goto retry_tmf;
}
/* Initialize rest of io_req fields */
io_req->sc_cmd = sc_cmd;
io_req->sc_cmd = NULL;
io_req->port = port;
io_req->tgt = tgt;
@ -736,11 +732,13 @@ retry_tmf:
/* Set TM flags */
io_req->io_req_flags = 0;
tm_req->tm_flags = tm_flags;
tm_req->tm_lun = tm_lun;
/* Fill FCP_CMND */
bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
int_to_scsilun(tm_lun, &fcp_cmnd->fc_lun);
memset(fcp_cmnd->fc_cdb, 0, BNX2FC_MAX_CMD_LEN);
fcp_cmnd->fc_dl = 0;
/* Fill FC header */
@ -763,8 +761,6 @@ retry_tmf:
task = &(task_page[index]);
bnx2fc_init_mp_task(io_req, task);
sc_cmd->SCp.ptr = (char *)io_req;
/* Obtain free SQ entry */
spin_lock_bh(&tgt->tgt_lock);
bnx2fc_add_2_sq(tgt, xid);
@ -1062,7 +1058,10 @@ cleanup_err:
*/
int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
{
return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_lport *lport = shost_priv(rport_to_shost(rport));
return bnx2fc_initiate_tmf(lport, rport, 0, FCP_TMF_TGT_RESET);
}
/**
@ -1075,7 +1074,11 @@ int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
*/
int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
{
return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_lport *lport = shost_priv(rport_to_shost(rport));
return bnx2fc_initiate_tmf(lport, rport, sc_cmd->device->lun,
FCP_TMF_LUN_RESET);
}
static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
@ -1145,7 +1148,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
spin_lock_bh(&tgt->tgt_lock);
io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
io_req = bnx2fc_priv(sc_cmd)->io_req;
if (!io_req) {
/* Command might have just completed */
printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
@ -1450,10 +1453,9 @@ io_compl:
static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
{
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct bnx2fc_rport *tgt = io_req->tgt;
struct bnx2fc_cmd *cmd, *tmp;
u64 tm_lun = sc_cmd->device->lun;
struct bnx2fc_mp_req *tm_req = &io_req->mp_req;
u64 lun;
int rc = 0;
@ -1465,8 +1467,10 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
*/
list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
if (!cmd->sc_cmd)
continue;
lun = cmd->sc_cmd->device->lun;
if (lun == tm_lun) {
if (lun == tm_req->tm_lun) {
/* Initiate ABTS on this cmd */
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
&cmd->req_flags)) {
@ -1570,32 +1574,37 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
fc_hdr->fh_r_ctl);
}
if (!sc_cmd->SCp.ptr) {
printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
return;
}
switch (io_req->fcp_status) {
case FC_GOOD:
if (io_req->cdb_status == 0) {
/* Good IO completion */
sc_cmd->result = DID_OK << 16;
} else {
/* Transport status is good, SCSI status not good */
sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
if (sc_cmd) {
if (!bnx2fc_priv(sc_cmd)->io_req) {
printk(KERN_ERR PFX "tm_compl: io_req is NULL\n");
return;
}
if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid);
break;
switch (io_req->fcp_status) {
case FC_GOOD:
if (io_req->cdb_status == 0) {
/* Good IO completion */
sc_cmd->result = DID_OK << 16;
} else {
/* Transport status is good, SCSI status not good */
sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
}
if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid);
break;
default:
BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
io_req->fcp_status);
break;
default:
BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
io_req->fcp_status);
break;
}
sc_cmd = io_req->sc_cmd;
io_req->sc_cmd = NULL;
bnx2fc_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
}
sc_cmd = io_req->sc_cmd;
io_req->sc_cmd = NULL;
/* check if the io_req exists in tgt's tmf_q */
if (io_req->on_tmf_queue) {
@ -1607,9 +1616,6 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
return;
}
sc_cmd->SCp.ptr = NULL;
sc_cmd->scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
if (io_req->wait_for_abts_comp) {
BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
@ -1738,15 +1744,9 @@ static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
struct fcp_cmnd *fcp_cmnd)
{
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
fcp_cmnd->fc_cmdref = 0;
fcp_cmnd->fc_pri_ta = 0;
fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
@ -1771,8 +1771,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
io_req->fcp_resid = fcp_rsp->fcp_resid;
io_req->scsi_comp_flags = rsp_flags;
CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
fcp_rsp->scsi_status_code;
io_req->cdb_status = fcp_rsp->scsi_status_code;
/* Fetch fcp_rsp_info and fcp_sns_info if available */
if (num_rq) {
@ -1851,7 +1850,7 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
return 0;
}
@ -1944,8 +1943,8 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* parse fcp_rsp and obtain sense data from RQ if available */
bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data);
if (!sc_cmd->SCp.ptr) {
printk(KERN_ERR PFX "SCp.ptr is NULL\n");
if (!bnx2fc_priv(sc_cmd)->io_req) {
printk(KERN_ERR PFX "io_req is NULL\n");
return;
}
@ -2016,8 +2015,8 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
io_req->fcp_status);
break;
}
sc_cmd->SCp.ptr = NULL;
sc_cmd->scsi_done(sc_cmd);
bnx2fc_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
@ -2031,7 +2030,6 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct fc_lport *lport = port->lport;
struct fc_stats *stats;
int task_idx, index;
u16 xid;
@ -2042,22 +2040,20 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
io_req->port = port;
io_req->tgt = tgt;
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
sc_cmd->SCp.ptr = (char *)io_req;
bnx2fc_priv(sc_cmd)->io_req = io_req;
stats = per_cpu_ptr(lport->stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
io_req->io_req_flags = BNX2FC_READ;
stats->InputRequests++;
stats->InputBytes += io_req->data_xfer_len;
this_cpu_inc(lport->stats->InputRequests);
this_cpu_add(lport->stats->InputBytes, io_req->data_xfer_len);
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
io_req->io_req_flags = BNX2FC_WRITE;
stats->OutputRequests++;
stats->OutputBytes += io_req->data_xfer_len;
this_cpu_inc(lport->stats->OutputRequests);
this_cpu_add(lport->stats->OutputBytes, io_req->data_xfer_len);
} else {
io_req->io_req_flags = 0;
stats->ControlRequests++;
this_cpu_inc(lport->stats->ControlRequests);
}
put_cpu();
xid = io_req->xid;

View File

@ -128,10 +128,8 @@ retry_ofld:
BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
"retry ofld..%d\n", i++);
msleep_interruptible(1000);
if (i > 3) {
i = 0;
if (i > 3)
goto ofld_err;
}
goto retry_ofld;
}
goto ofld_err;

View File

@ -277,6 +277,7 @@ static struct scsi_host_template fcoe_shost_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xffff,
.track_queue_depth = 1,
.cmd_size = sizeof(struct libfc_cmd_priv),
};
/**
@ -307,7 +308,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
}
/* Do not support for bonding device */
if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
if (netif_is_bond_master(netdev)) {
FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
return -EOPNOTSUPP;
}
@ -1433,8 +1434,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
return NET_RX_SUCCESS;
err:
per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
put_cpu();
this_cpu_inc(lport->stats->ErrorFrames);
err2:
kfree_skb(skb);
return NET_RX_DROP;
@ -1452,9 +1452,10 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
struct fcoe_percpu_s *fps;
int rc;
fps = &get_cpu_var(fcoe_percpu);
local_lock(&fcoe_percpu.lock);
fps = this_cpu_ptr(&fcoe_percpu);
rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
put_cpu_var(fcoe_percpu);
local_unlock(&fcoe_percpu.lock);
return rc;
}
@ -1473,7 +1474,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct ethhdr *eh;
struct fcoe_crc_eof *cp;
struct sk_buff *skb;
struct fc_stats *stats;
struct fc_frame_header *fh;
unsigned int hlen; /* header length implies the version */
unsigned int tlen; /* trailer length */
@ -1488,7 +1488,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
fh = fc_frame_header_get(fp);
skb = fp_skb(fp);
wlen = skb->len / FCOE_WORD_TO_BYTE;
if (!lport->link_up) {
kfree_skb(skb);
@ -1584,10 +1583,8 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_shinfo(skb)->gso_size = 0;
}
/* update tx stats: regardless if LLD fails */
stats = per_cpu_ptr(lport->stats, get_cpu());
stats->TxFrames++;
stats->TxWords += wlen;
put_cpu();
this_cpu_inc(lport->stats->TxFrames);
this_cpu_add(lport->stats->TxWords, wlen);
/* send down to lld */
fr_dev(fp) = lport;
@ -1609,7 +1606,6 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct sk_buff *skb = (struct sk_buff *)fp;
struct fc_stats *stats;
/*
* We only check CRC if no offload is available and if it is
@ -1639,11 +1635,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
}
stats = per_cpu_ptr(lport->stats, get_cpu());
stats->InvalidCRCCount++;
if (stats->InvalidCRCCount < 5)
if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
put_cpu();
return -EINVAL;
}
@ -1656,7 +1649,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
u32 fr_len;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fc_stats *stats;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fcoe_hdr *hp;
@ -1684,9 +1676,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
stats = per_cpu_ptr(lport->stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
struct fc_stats *stats;
stats = per_cpu_ptr(lport->stats, raw_smp_processor_id());
if (READ_ONCE(stats->ErrorFrames) < 5)
printk(KERN_WARNING "fcoe: FCoE version "
"mismatch: The frame has "
"version %x, but the "
@ -1699,8 +1693,8 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
stats->RxFrames++;
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
this_cpu_inc(lport->stats->RxFrames);
this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE);
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
@ -1716,13 +1710,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
put_cpu();
fc_exch_recv(lport, fp);
return;
}
drop:
stats->ErrorFrames++;
put_cpu();
this_cpu_inc(lport->stats->ErrorFrames);
kfree_skb(skb);
}
@ -1846,7 +1838,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_stats *stats;
u32 link_possible = 1;
u32 mfs;
int rc = NOTIFY_OK;
@ -1920,9 +1911,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
break;
case FCOE_CTLR_ENABLED:
case FCOE_CTLR_UNUSED:
stats = per_cpu_ptr(lport->stats, get_cpu());
stats->LinkFailureCount++;
put_cpu();
this_cpu_inc(lport->stats->LinkFailureCount);
fcoe_clean_pending_queue(lport);
}
}
@ -2487,6 +2476,7 @@ static int __init fcoe_init(void)
p = per_cpu_ptr(&fcoe_percpu, cpu);
INIT_WORK(&p->work, fcoe_receive_work);
skb_queue_head_init(&p->fcoe_rx_list);
local_lock_init(&p->lock);
}
/* Setup link change notification */
@ -2501,6 +2491,7 @@ static int __init fcoe_init(void)
out_free:
mutex_unlock(&fcoe_config_mutex);
fcoe_transport_detach(&fcoe_sw_transport);
out_destroy:
destroy_workqueue(fcoe_wq);
return rc;
@ -2579,7 +2570,7 @@ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
/* pre-FIP */
if (is_zero_ether_addr(mac))
fcoe_ctlr_recv_flogi(fip, lport, fp);
if (!is_zero_ether_addr(mac))
else
fcoe_update_src_mac(lport, mac);
done:
fc_lport_flogi_resp(seq, fp, lport);

View File

@ -824,22 +824,21 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
unsigned long deadline;
unsigned long sel_time = 0;
struct list_head del_list;
struct fc_stats *stats;
INIT_LIST_HEAD(&del_list);
stats = per_cpu_ptr(fip->lp->stats, get_cpu());
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
if (fip->sel_fcf == fcf) {
if (time_after(jiffies, deadline)) {
stats->MissDiscAdvCount++;
u64 miss_cnt;
miss_cnt = this_cpu_inc_return(fip->lp->stats->MissDiscAdvCount);
printk(KERN_INFO "libfcoe: host%d: "
"Missing Discovery Advertisement "
"for fab %16.16llx count %lld\n",
fip->lp->host->host_no, fcf->fabric_name,
stats->MissDiscAdvCount);
miss_cnt);
} else if (time_after(next_timer, deadline))
next_timer = deadline;
}
@ -855,7 +854,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
*/
list_del(&fcf->list);
list_add(&fcf->list, &del_list);
stats->VLinkFailureCount++;
this_cpu_inc(fip->lp->stats->VLinkFailureCount);
} else {
if (time_after(next_timer, deadline))
next_timer = deadline;
@ -864,7 +863,6 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
sel_time = fcf->time;
}
}
put_cpu();
list_for_each_entry_safe(fcf, next, &del_list, list) {
/* Removes fcf from current list */
@ -1142,7 +1140,6 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fip_desc *desc;
struct fip_encaps *els;
struct fcoe_fcf *sel;
struct fc_stats *stats;
enum fip_desc_type els_dtype = 0;
u8 els_op;
u8 sub;
@ -1286,10 +1283,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
fr_dev(fp) = lport;
fr_encaps(fp) = els_dtype;
stats = per_cpu_ptr(lport->stats, get_cpu());
stats->RxFrames++;
stats->RxWords += skb->len / FIP_BPW;
put_cpu();
this_cpu_inc(lport->stats->RxFrames);
this_cpu_add(lport->stats->RxWords, skb->len / FIP_BPW);
fc_exch_recv(lport, fp);
return;
@ -1427,9 +1422,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
ntoh24(vp->fd_fc_id));
if (vn_port && (vn_port == lport)) {
mutex_lock(&fip->ctlr_mutex);
per_cpu_ptr(lport->stats,
get_cpu())->VLinkFailureCount++;
put_cpu();
this_cpu_inc(lport->stats->VLinkFailureCount);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
}
@ -1457,8 +1450,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
* followed by physical port
*/
mutex_lock(&fip->ctlr_mutex);
per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++;
put_cpu();
this_cpu_inc(lport->stats->VLinkFailureCount);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);

View File

@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <scsi/fcoe_sysfs.h>
#include <scsi/libfcoe.h>
@ -210,25 +211,13 @@ static const char *get_fcoe_##title##_name(enum table_type table_key) \
return table[table_key]; \
}
static char *fip_conn_type_names[] = {
static const char * const fip_conn_type_names[] = {
[ FIP_CONN_TYPE_UNKNOWN ] = "Unknown",
[ FIP_CONN_TYPE_FABRIC ] = "Fabric",
[ FIP_CONN_TYPE_VN2VN ] = "VN2VN",
};
fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
static enum fip_conn_type fcoe_parse_mode(const char *buf)
{
int i;
for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) {
if (strcasecmp(buf, fip_conn_type_names[i]) == 0)
return i;
}
return FIP_CONN_TYPE_UNKNOWN;
}
static char *fcf_state_names[] = {
[ FCOE_FCF_STATE_UNKNOWN ] = "Unknown",
[ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected",
@ -270,17 +259,11 @@ static ssize_t store_ctlr_mode(struct device *dev,
const char *buf, size_t count)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
char mode[FCOE_MAX_MODENAME_LEN + 1];
int res;
if (count > FCOE_MAX_MODENAME_LEN)
return -EINVAL;
strncpy(mode, buf, count);
if (mode[count - 1] == '\n')
mode[count - 1] = '\0';
else
mode[count] = '\0';
switch (ctlr->enabled) {
case FCOE_CTLR_ENABLED:
@ -293,12 +276,13 @@ static ssize_t store_ctlr_mode(struct device *dev,
return -ENOTSUPP;
}
ctlr->mode = fcoe_parse_mode(mode);
if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
res = sysfs_match_string(fip_conn_type_names, buf);
if (res < 0 || res == FIP_CONN_TYPE_UNKNOWN) {
LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
buf);
return -EINVAL;
}
ctlr->mode = res;
ctlr->f->set_fcoe_ctlr_mode(ctlr);
LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);
@ -609,7 +593,7 @@ static const struct attribute_group *fcoe_fcf_attr_groups[] = {
NULL,
};
static struct bus_type fcoe_bus_type;
static const struct bus_type fcoe_bus_type;
static int fcoe_bus_match(struct device *dev,
struct device_driver *drv)
@ -676,7 +660,7 @@ static struct attribute *fcoe_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(fcoe_bus);
static struct bus_type fcoe_bus_type = {
static const struct bus_type fcoe_bus_type = {
.name = "fcoe",
.match = &fcoe_bus_match,
.bus_groups = fcoe_bus_groups,
@ -822,14 +806,15 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
error = device_register(&ctlr->dev);
if (error)
goto out_del_q2;
if (error) {
destroy_workqueue(ctlr->devloss_work_q);
destroy_workqueue(ctlr->work_q);
put_device(&ctlr->dev);
return NULL;
}
return ctlr;
out_del_q2:
destroy_workqueue(ctlr->devloss_work_q);
ctlr->devloss_work_q = NULL;
out_del_q:
destroy_workqueue(ctlr->work_q);
ctlr->work_q = NULL;
@ -1028,16 +1013,16 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
fcf->selected = new_fcf->selected;
error = device_register(&fcf->dev);
if (error)
goto out_del;
if (error) {
put_device(&fcf->dev);
goto out;
}
fcf->state = FCOE_FCF_STATE_CONNECTED;
list_add_tail(&fcf->peers, &ctlr->fcfs);
return fcf;
out_del:
kfree(fcf);
out:
return NULL;
}

View File

@ -183,9 +183,9 @@ void __fcoe_get_lesb(struct fc_lport *lport,
memset(lesb, 0, sizeof(*lesb));
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(lport->stats, cpu);
lfc += stats->LinkFailureCount;
vlfc += stats->VLinkFailureCount;
mdac += stats->MissDiscAdvCount;
lfc += READ_ONCE(stats->LinkFailureCount);
vlfc += READ_ONCE(stats->VLinkFailureCount);
mdac += READ_ONCE(stats->MissDiscAdvCount);
}
lesb->lesb_link_fail = htonl(lfc);
lesb->lesb_vlink_fail = htonl(vlfc);
@ -711,7 +711,7 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
char ifname[IFNAMSIZ + 2];
if (buffer) {
strlcpy(ifname, buffer, IFNAMSIZ);
strscpy(ifname, buffer, IFNAMSIZ);
cp = ifname + strlen(ifname);
while (--cp >= ifname && *cp == '\n')
*cp = '\0';

View File

@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/rculist.h>
#include <linux/list.h>
#include <asm/unaligned.h>
@ -75,7 +75,6 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
struct fc_seq_els_data rjt_data;
unsigned int len;
int redisc = 0;
enum fc_els_rscn_ev_qual ev_qual;
enum fc_els_rscn_addr_fmt fmt;
LIST_HEAD(disc_ports);
struct fc_disc_port *dp, *next;
@ -107,8 +106,6 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
goto reject;
for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
ev_qual &= ELS_RSCN_EV_QUAL_MASK;
fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
fmt &= ELS_RSCN_ADDR_FMT_MASK;
/*

View File

@ -136,22 +136,24 @@ static inline int fc_ct_ns_fill(struct fc_lport *lport,
break;
case FC_NS_RSPN_ID:
len = strnlen(fc_host_symbolic_name(lport->host), 255);
len = strnlen(fc_host_symbolic_name(lport->host),
FC_SYMBOLIC_NAME_SIZE);
ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len,
FC_FST_DIR, FC_NS_SUBTYPE);
hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id);
strncpy(ct->payload.spn.fr_name,
fc_host_symbolic_name(lport->host), len);
memcpy(ct->payload.spn.fr_name,
fc_host_symbolic_name(lport->host), len);
ct->payload.spn.fr_name_len = len;
break;
case FC_NS_RSNN_NN:
len = strnlen(fc_host_symbolic_name(lport->host), 255);
len = strnlen(fc_host_symbolic_name(lport->host),
FC_SYMBOLIC_NAME_SIZE);
ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len,
FC_FST_DIR, FC_NS_SUBTYPE);
put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn);
strncpy(ct->payload.snn.fr_name,
fc_host_symbolic_name(lport->host), len);
memcpy(ct->payload.snn.fr_name,
fc_host_symbolic_name(lport->host), len);
ct->payload.snn.fr_name_len = len;
break;
@ -246,7 +248,7 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
&entry->type);
put_unaligned_be16(len, &entry->len);
put_unaligned_be64(lport->wwnn,
(__be64 *)&entry->value[0]);
(__be64 *)&entry->value);
/* Manufacturer */
entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +

View File

@ -45,14 +45,10 @@ static struct kmem_cache *scsi_pkt_cachep;
#define FC_SRB_READ (1 << 1)
#define FC_SRB_WRITE (1 << 0)
/*
* The SCp.ptr should be tested and set under the scsi_pkt_queue lock
*/
#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
static struct libfc_cmd_priv *libfc_priv(struct scsi_cmnd *cmd)
{
return scsi_cmd_priv(cmd);
}
/**
* struct fc_fcp_internal - FCP layer internal data
@ -147,8 +143,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
INIT_LIST_HEAD(&fsp->list);
spin_lock_init(&fsp->scsi_pkt_lock);
} else {
per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++;
put_cpu();
this_cpu_inc(lport->stats->FcpPktAllocFails);
}
return fsp;
}
@ -270,8 +265,12 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
if (!fsp->seq_ptr)
return -EINVAL;
per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++;
put_cpu();
if (fsp->state & FC_SRB_ABORT_PENDING) {
FC_FCP_DBG(fsp, "abort already pending\n");
return -EBUSY;
}
this_cpu_inc(fsp->lp->stats->FcpPktAborts);
fsp->state |= FC_SRB_ABORT_PENDING;
rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
@ -440,8 +439,7 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
if (likely(fp))
return fp;
per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++;
put_cpu();
this_cpu_inc(lport->stats->FcpFrameAllocFails);
/* error case */
fc_fcp_can_queue_ramp_down(lport);
shost_printk(KERN_ERR, lport->host,
@ -475,7 +473,6 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
struct scsi_cmnd *sc = fsp->cmd;
struct fc_lport *lport = fsp->lp;
struct fc_stats *stats;
struct fc_frame_header *fh;
size_t start_offset;
size_t offset;
@ -537,14 +534,12 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (~crc != le32_to_cpu(fr_crc(fp))) {
crc_err:
stats = per_cpu_ptr(lport->stats, get_cpu());
stats->ErrorFrames++;
this_cpu_inc(lport->stats->ErrorFrames);
/* per cpu count, not total count, but OK for limit */
if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < FC_MAX_ERROR_CNT)
printk(KERN_WARNING "libfc: CRC error on data "
"frame for port (%6.6x)\n",
lport->port_id);
put_cpu();
/*
* Assume the frame is total garbage.
* We may have copied it over the good part
@ -1137,7 +1132,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
unsigned long flags;
int rc;
fsp->cmd->SCp.ptr = (char *)fsp;
libfc_priv(fsp->cmd)->fsp = fsp;
fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
@ -1150,7 +1145,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
if (unlikely(rc)) {
spin_lock_irqsave(&si->scsi_queue_lock, flags);
fsp->cmd->SCp.ptr = NULL;
libfc_priv(fsp->cmd)->fsp = NULL;
list_del(&fsp->list);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
}
@ -1681,7 +1676,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
fc_fcp_recovery(fsp, FC_ERROR);
fc_fcp_recovery(fsp, FC_TIMED_OUT);
break;
}
fc_fcp_unlock_pkt(fsp);
@ -1700,11 +1695,12 @@ static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
fsp->status_code = code;
fsp->cdb_status = 0;
fsp->io_status = 0;
/*
* if this fails then we let the scsi command timer fire and
* scsi-ml escalate.
*/
fc_fcp_send_abort(fsp);
if (!fsp->cmd)
/*
* Only abort non-scsi commands; otherwise let the
* scsi command timer fire and scsi-ml escalate.
*/
fc_fcp_send_abort(fsp);
}
/**
@ -1865,12 +1861,11 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
struct fc_fcp_pkt *fsp;
int rval;
int rc = 0;
struct fc_stats *stats;
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
return 0;
}
@ -1880,7 +1875,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
* online
*/
sc_cmd->result = DID_IMM_RETRY << 16;
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
goto out;
}
@ -1917,20 +1912,18 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
/*
* setup the data direction
*/
stats = per_cpu_ptr(lport->stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
fsp->req_flags = FC_SRB_READ;
stats->InputRequests++;
stats->InputBytes += fsp->data_len;
this_cpu_inc(lport->stats->InputRequests);
this_cpu_add(lport->stats->InputBytes, fsp->data_len);
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
fsp->req_flags = FC_SRB_WRITE;
stats->OutputRequests++;
stats->OutputBytes += fsp->data_len;
this_cpu_inc(lport->stats->OutputRequests);
this_cpu_add(lport->stats->OutputBytes, fsp->data_len);
} else {
fsp->req_flags = 0;
stats->ControlRequests++;
this_cpu_inc(lport->stats->ControlRequests);
}
put_cpu();
/*
* send it to the lower layer
@ -1983,7 +1976,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
fc_fcp_can_queue_ramp_up(lport);
sc_cmd = fsp->cmd;
CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
libfc_priv(sc_cmd)->status = fsp->cdb_status;
switch (fsp->status_code) {
case FC_COMPLETE:
if (fsp->cdb_status == 0) {
@ -1992,7 +1985,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
*/
sc_cmd->result = DID_OK << 16;
if (fsp->scsi_resid)
CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
} else {
/*
* transport level I/O was ok but scsi
@ -2025,7 +2018,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
*/
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
"due to FC_DATA_UNDRUN (scsi)\n");
CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
}
break;
@ -2069,9 +2062,9 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_PARITY << 16);
break;
case FC_TIMED_OUT:
FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml "
FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml "
"due to FC_TIMED_OUT\n");
sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
sc_cmd->result = (DID_TIME_OUT << 16);
break;
default:
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
@ -2085,9 +2078,9 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_del(&fsp->list);
sc_cmd->SCp.ptr = NULL;
libfc_priv(sc_cmd)->fsp = NULL;
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
/* release ref from initial allocation in queue command */
fc_fcp_pkt_release(fsp);
@ -2121,7 +2114,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
si = fc_get_scsi_internal(lport);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
fsp = CMD_SP(sc_cmd);
fsp = libfc_priv(sc_cmd)->fsp;
if (!fsp) {
/* command completed while scsi eh was setting up */
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);

View File

@ -314,21 +314,21 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
stats = per_cpu_ptr(lport->stats, cpu);
fc_stats->tx_frames += stats->TxFrames;
fc_stats->tx_words += stats->TxWords;
fc_stats->rx_frames += stats->RxFrames;
fc_stats->rx_words += stats->RxWords;
fc_stats->error_frames += stats->ErrorFrames;
fc_stats->invalid_crc_count += stats->InvalidCRCCount;
fc_stats->fcp_input_requests += stats->InputRequests;
fc_stats->fcp_output_requests += stats->OutputRequests;
fc_stats->fcp_control_requests += stats->ControlRequests;
fcp_in_bytes += stats->InputBytes;
fcp_out_bytes += stats->OutputBytes;
fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
fc_stats->link_failure_count += stats->LinkFailureCount;
fc_stats->tx_frames += READ_ONCE(stats->TxFrames);
fc_stats->tx_words += READ_ONCE(stats->TxWords);
fc_stats->rx_frames += READ_ONCE(stats->RxFrames);
fc_stats->rx_words += READ_ONCE(stats->RxWords);
fc_stats->error_frames += READ_ONCE(stats->ErrorFrames);
fc_stats->invalid_crc_count += READ_ONCE(stats->InvalidCRCCount);
fc_stats->fcp_input_requests += READ_ONCE(stats->InputRequests);
fc_stats->fcp_output_requests += READ_ONCE(stats->OutputRequests);
fc_stats->fcp_control_requests += READ_ONCE(stats->ControlRequests);
fcp_in_bytes += READ_ONCE(stats->InputBytes);
fcp_out_bytes += READ_ONCE(stats->OutputBytes);
fc_stats->fcp_packet_alloc_failures += READ_ONCE(stats->FcpPktAllocFails);
fc_stats->fcp_packet_aborts += READ_ONCE(stats->FcpPktAborts);
fc_stats->fcp_frame_alloc_failures += READ_ONCE(stats->FcpFrameAllocFails);
fc_stats->link_failure_count += READ_ONCE(stats->LinkFailureCount);
}
fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);

View File

@ -1079,7 +1079,6 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
u32 crc;
unsigned int hlen, tlen, elen;
int wlen;
struct fc_stats *stats;
struct fc_lport *tmp_lport;
struct fc_lport *vn_port = NULL;
struct qedf_rport *fcport;
@ -1227,10 +1226,8 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
hp->fcoe_sof = sof;
/*update tx stats */
stats = per_cpu_ptr(lport->stats, get_cpu());
stats->TxFrames++;
stats->TxWords += wlen;
put_cpu();
this_cpu_inc(lport->stats->TxFrames);
this_cpu_add(lport->stats->TxWords, wlen);
/* Get VLAN ID from skb for printing purposes */
__vlan_hwaccel_get_tag(skb, &vlan_tci);

View File

@ -158,7 +158,7 @@ struct fc_fdmi_port_name {
struct fc_fdmi_attr_entry {
__be16 type;
__be16 len;
__u8 value[1];
__u8 value[];
} __attribute__((__packed__));
/*
@ -166,7 +166,7 @@ struct fc_fdmi_attr_entry {
*/
struct fs_fdmi_attrs {
__be32 numattrs;
struct fc_fdmi_attr_entry attr[1];
struct fc_fdmi_attr_entry attr[];
} __attribute__((__packed__));
/*

View File

@ -44,11 +44,16 @@
* @LPORT_ST_DISABLED: Disabled
* @LPORT_ST_FLOGI: Fabric login (FLOGI) sent
* @LPORT_ST_DNS: Waiting for name server remote port to become ready
* @LPORT_ST_RPN_ID: Register port name by ID (RPN_ID) sent
* @LPORT_ST_RNN_ID: Register port name by ID (RNN_ID) sent
* @LPORT_ST_RSNN_NN: Waiting for host symbolic node name
* @LPORT_ST_RSPN_ID: Waiting for host symbolic port name
* @LPORT_ST_RFT_ID: Register Fibre Channel types by ID (RFT_ID) sent
* @LPORT_ST_RFF_ID: Register FC-4 Features by ID (RFF_ID) sent
* @LPORT_ST_FDMI: Waiting for mgmt server rport to become ready
* @LPORT_ST_RHBA:
* @LPORT_ST_RHBA: Register HBA
* @LPORT_ST_RPA: Register Port Attributes
* @LPORT_ST_DHBA: Deregister HBA
* @LPORT_ST_DPRT: Deregister Port
* @LPORT_ST_SCR: State Change Register (SCR) sent
* @LPORT_ST_READY: Ready for use
* @LPORT_ST_LOGO: Local port logout (LOGO) sent
@ -183,7 +188,7 @@ struct fc_rport_libfc_priv {
* @r_a_tov: Resource allocation timeout value (in msec)
* @rp_mutex: The mutex that protects the remote port
* @retry_work: Handle for retries
* @event_callback: Callback when READY, FAILED or LOGO states complete
* @lld_event_callback: Callback when READY, FAILED or LOGO states complete
* @prli_count: Count of open PRLI sessions in providers
* @rcu: Structure used for freeing in an RCU-safe manner
*/
@ -289,6 +294,7 @@ struct fc_seq_els_data {
* @timer: The command timer
* @tm_done: Completion indicator
* @wait_for_comp: Indicator to wait for completion of the I/O (in jiffies)
* @timer_delay: FCP packet timer delay in jiffies
* @data_len: The length of the data
* @cdb_cmd: The CDB command
* @xfer_len: The transfer length
@ -351,6 +357,15 @@ struct fc_fcp_pkt {
struct completion tm_done;
} ____cacheline_aligned_in_smp;
/*
* @fsp should be tested and set under the scsi_pkt_queue lock
*/
struct libfc_cmd_priv {
struct fc_fcp_pkt *fsp;
u32 resid_len;
u8 status;
};
/*
* Structure and function definitions for managing Fibre Channel Exchanges
* and Sequences
@ -779,6 +794,8 @@ void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *);
/**
* fc_lport_test_ready() - Determine if a local port is in the READY state
* @lport: The local port to test
*
* Returns: %true if local port is in the READY state, %false otherwise
*/
static inline int fc_lport_test_ready(struct fc_lport *lport)
{
@ -821,6 +838,8 @@ static inline void fc_lport_state_enter(struct fc_lport *lport,
/**
* fc_lport_init_stats() - Allocate per-CPU statistics for a local port
* @lport: The local port whose statistics are to be initialized
*
* Returns: %0 on success, %-ENOMEM on failure
*/
static inline int fc_lport_init_stats(struct fc_lport *lport)
{
@ -842,6 +861,8 @@ static inline void fc_lport_free_stats(struct fc_lport *lport)
/**
* lport_priv() - Return the private data from a local port
* @lport: The local port whose private data is to be retrieved
*
* Returns: the local port's private data pointer
*/
static inline void *lport_priv(const struct fc_lport *lport)
{

View File

@ -14,7 +14,8 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/random.h>
#include <linux/local_lock.h>
#include <linux/prandom.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fcoe_sysfs.h>
@ -333,6 +334,7 @@ struct fcoe_percpu_s {
struct sk_buff_head fcoe_rx_list;
struct page *crc_eof_page;
int crc_eof_offset;
local_lock_t lock;
};
/**