[SCSI] lpfc 8.3.5: fix reset path, ELS ordering and discovery issues
This patch includes the following fixes: - Fixed panic during HBA reset. - Fixed FCoE event tag passed in resume_rpi. - Fix out of order ELS commands - Fixed discovery issues found during VLAN testing. - Fix UNREG_VPI failure on extended link pull - Fixed crash while processing unsolicited FC frames. - Clear retry count in the delayed ELS handler - Fixed discovery failure during quick link bounce. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
This commit is contained in:
parent
1796e72291
commit
4d9ab994e2
9 changed files with 254 additions and 232 deletions
|
@ -109,7 +109,7 @@ struct hbq_dmabuf {
|
||||||
struct lpfc_dmabuf dbuf;
|
struct lpfc_dmabuf dbuf;
|
||||||
uint32_t size;
|
uint32_t size;
|
||||||
uint32_t tag;
|
uint32_t tag;
|
||||||
struct lpfc_rcqe rcqe;
|
struct lpfc_cq_event cq_event;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
|
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
|
||||||
|
@ -551,6 +551,7 @@ struct lpfc_hba {
|
||||||
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
|
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
|
||||||
|
|
||||||
uint32_t fc_eventTag; /* event tag for link attention */
|
uint32_t fc_eventTag; /* event tag for link attention */
|
||||||
|
uint32_t link_events;
|
||||||
|
|
||||||
/* These fields used to be binfo */
|
/* These fields used to be binfo */
|
||||||
uint32_t fc_pref_DID; /* preferred D_ID */
|
uint32_t fc_pref_DID; /* preferred D_ID */
|
||||||
|
|
|
@ -3815,7 +3815,11 @@ lpfc_get_stats(struct Scsi_Host *shost)
|
||||||
hs->invalid_crc_count -= lso->invalid_crc_count;
|
hs->invalid_crc_count -= lso->invalid_crc_count;
|
||||||
hs->error_frames -= lso->error_frames;
|
hs->error_frames -= lso->error_frames;
|
||||||
|
|
||||||
if (phba->fc_topology == TOPOLOGY_LOOP) {
|
if (phba->hba_flag & HBA_FCOE_SUPPORT) {
|
||||||
|
hs->lip_count = -1;
|
||||||
|
hs->nos_count = (phba->link_events >> 1);
|
||||||
|
hs->nos_count -= lso->link_events;
|
||||||
|
} else if (phba->fc_topology == TOPOLOGY_LOOP) {
|
||||||
hs->lip_count = (phba->fc_eventTag >> 1);
|
hs->lip_count = (phba->fc_eventTag >> 1);
|
||||||
hs->lip_count -= lso->link_events;
|
hs->lip_count -= lso->link_events;
|
||||||
hs->nos_count = -1;
|
hs->nos_count = -1;
|
||||||
|
@ -3906,7 +3910,10 @@ lpfc_reset_stats(struct Scsi_Host *shost)
|
||||||
lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
|
lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
|
||||||
lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
|
lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
|
||||||
lso->error_frames = pmb->un.varRdLnk.crcCnt;
|
lso->error_frames = pmb->un.varRdLnk.crcCnt;
|
||||||
lso->link_events = (phba->fc_eventTag >> 1);
|
if (phba->hba_flag & HBA_FCOE_SUPPORT)
|
||||||
|
lso->link_events = (phba->link_events >> 1);
|
||||||
|
else
|
||||||
|
lso->link_events = (phba->fc_eventTag >> 1);
|
||||||
|
|
||||||
psli->stats_start = get_seconds();
|
psli->stats_start = get_seconds();
|
||||||
|
|
||||||
|
|
|
@ -235,7 +235,7 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
|
||||||
int lpfc_sli_check_eratt(struct lpfc_hba *);
|
int lpfc_sli_check_eratt(struct lpfc_hba *);
|
||||||
void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
|
void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
|
||||||
struct lpfc_sli_ring *, uint32_t);
|
struct lpfc_sli_ring *, uint32_t);
|
||||||
int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
|
void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
|
||||||
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||||
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
|
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
|
||||||
struct lpfc_iocbq *, uint32_t);
|
struct lpfc_iocbq *, uint32_t);
|
||||||
|
|
|
@ -2452,6 +2452,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
|
||||||
*/
|
*/
|
||||||
del_timer_sync(&ndlp->nlp_delayfunc);
|
del_timer_sync(&ndlp->nlp_delayfunc);
|
||||||
retry = ndlp->nlp_retry;
|
retry = ndlp->nlp_retry;
|
||||||
|
ndlp->nlp_retry = 0;
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case ELS_CMD_FLOGI:
|
case ELS_CMD_FLOGI:
|
||||||
|
|
|
@ -525,8 +525,6 @@ lpfc_work_done(struct lpfc_hba *phba)
|
||||||
spin_unlock_irq(&phba->hbalock);
|
spin_unlock_irq(&phba->hbalock);
|
||||||
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
|
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
|
||||||
}
|
}
|
||||||
if (phba->hba_flag & HBA_RECEIVE_BUFFER)
|
|
||||||
lpfc_sli4_handle_received_buffer(phba);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
vports = lpfc_create_vport_work_array(phba);
|
vports = lpfc_create_vport_work_array(phba);
|
||||||
|
@ -568,8 +566,9 @@ lpfc_work_done(struct lpfc_hba *phba)
|
||||||
pring = &phba->sli.ring[LPFC_ELS_RING];
|
pring = &phba->sli.ring[LPFC_ELS_RING];
|
||||||
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
|
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
|
||||||
status >>= (4*LPFC_ELS_RING);
|
status >>= (4*LPFC_ELS_RING);
|
||||||
if ((status & HA_RXMASK)
|
if ((status & HA_RXMASK) ||
|
||||||
|| (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
|
(pring->flag & LPFC_DEFERRED_RING_EVENT) ||
|
||||||
|
(phba->hba_flag & HBA_RECEIVE_BUFFER)) {
|
||||||
if (pring->flag & LPFC_STOP_IOCB_EVENT) {
|
if (pring->flag & LPFC_STOP_IOCB_EVENT) {
|
||||||
pring->flag |= LPFC_DEFERRED_RING_EVENT;
|
pring->flag |= LPFC_DEFERRED_RING_EVENT;
|
||||||
/* Set the lpfc data pending flag */
|
/* Set the lpfc data pending flag */
|
||||||
|
@ -688,7 +687,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
|
||||||
lpfc_unreg_rpi(vport, ndlp);
|
lpfc_unreg_rpi(vport, ndlp);
|
||||||
|
|
||||||
/* Leave Fabric nodes alone on link down */
|
/* Leave Fabric nodes alone on link down */
|
||||||
if (!remove && ndlp->nlp_type & NLP_FABRIC)
|
if ((phba->sli_rev < LPFC_SLI_REV4) &&
|
||||||
|
(!remove && ndlp->nlp_type & NLP_FABRIC))
|
||||||
continue;
|
continue;
|
||||||
rc = lpfc_disc_state_machine(vport, ndlp, NULL,
|
rc = lpfc_disc_state_machine(vport, ndlp, NULL,
|
||||||
remove
|
remove
|
||||||
|
@ -1015,10 +1015,10 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
|
||||||
|
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||||
if (vport->port_state != LPFC_FLOGI) {
|
if (vport->port_state != LPFC_FLOGI) {
|
||||||
spin_lock_irqsave(&phba->hbalock, flags);
|
spin_lock_irqsave(&phba->hbalock, flags);
|
||||||
phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
|
|
||||||
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
|
||||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||||
lpfc_initial_flogi(vport);
|
lpfc_initial_flogi(vport);
|
||||||
}
|
}
|
||||||
|
@ -1199,6 +1199,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
|
||||||
|
|
||||||
/* If the FCF is not availabe do nothing. */
|
/* If the FCF is not availabe do nothing. */
|
||||||
if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
|
if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
|
||||||
|
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1216,15 +1217,23 @@ lpfc_register_fcf(struct lpfc_hba *phba)
|
||||||
|
|
||||||
fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
|
fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!fcf_mbxq)
|
if (!fcf_mbxq) {
|
||||||
|
spin_lock_irqsave(&phba->hbalock, flags);
|
||||||
|
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||||
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
lpfc_reg_fcfi(phba, fcf_mbxq);
|
lpfc_reg_fcfi(phba, fcf_mbxq);
|
||||||
fcf_mbxq->vport = phba->pport;
|
fcf_mbxq->vport = phba->pport;
|
||||||
fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
|
fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
|
||||||
rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
|
rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
|
||||||
if (rc == MBX_NOT_FINISHED)
|
if (rc == MBX_NOT_FINISHED) {
|
||||||
|
spin_lock_irqsave(&phba->hbalock, flags);
|
||||||
|
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||||
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||||
mempool_free(fcf_mbxq, phba->mbox_mem_pool);
|
mempool_free(fcf_mbxq, phba->mbox_mem_pool);
|
||||||
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1253,6 +1262,20 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
|
||||||
uint16_t *vlan_id)
|
uint16_t *vlan_id)
|
||||||
{
|
{
|
||||||
struct lpfc_fcf_conn_entry *conn_entry;
|
struct lpfc_fcf_conn_entry *conn_entry;
|
||||||
|
int i, j, fcf_vlan_id = 0;
|
||||||
|
|
||||||
|
/* Find the lowest VLAN id in the FCF record */
|
||||||
|
for (i = 0; i < 512; i++) {
|
||||||
|
if (new_fcf_record->vlan_bitmap[i]) {
|
||||||
|
fcf_vlan_id = i * 8;
|
||||||
|
j = 0;
|
||||||
|
while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
|
||||||
|
j++;
|
||||||
|
fcf_vlan_id++;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* If FCF not available return 0 */
|
/* If FCF not available return 0 */
|
||||||
if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
|
if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
|
||||||
|
@ -1286,7 +1309,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
|
||||||
if (*addr_mode & LPFC_FCF_FPMA)
|
if (*addr_mode & LPFC_FCF_FPMA)
|
||||||
*addr_mode = LPFC_FCF_FPMA;
|
*addr_mode = LPFC_FCF_FPMA;
|
||||||
|
|
||||||
*vlan_id = 0xFFFF;
|
/* If FCF record report a vlan id use that vlan id */
|
||||||
|
if (fcf_vlan_id)
|
||||||
|
*vlan_id = fcf_vlan_id;
|
||||||
|
else
|
||||||
|
*vlan_id = 0xFFFF;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1384,8 +1411,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
|
||||||
(*addr_mode & LPFC_FCF_FPMA))
|
(*addr_mode & LPFC_FCF_FPMA))
|
||||||
*addr_mode = LPFC_FCF_FPMA;
|
*addr_mode = LPFC_FCF_FPMA;
|
||||||
|
|
||||||
|
/* If matching connect list has a vlan id, use it */
|
||||||
if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
|
if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
|
||||||
*vlan_id = conn_entry->conn_rec.vlan_tag;
|
*vlan_id = conn_entry->conn_rec.vlan_tag;
|
||||||
|
/*
|
||||||
|
* If no vlan id is specified in connect list, use the vlan id
|
||||||
|
* in the FCF record
|
||||||
|
*/
|
||||||
|
else if (fcf_vlan_id)
|
||||||
|
*vlan_id = fcf_vlan_id;
|
||||||
else
|
else
|
||||||
*vlan_id = 0xFFFF;
|
*vlan_id = 0xFFFF;
|
||||||
|
|
||||||
|
@ -1423,6 +1457,12 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
|
||||||
|
|
||||||
if (phba->link_state >= LPFC_LINK_UP)
|
if (phba->link_state >= LPFC_LINK_UP)
|
||||||
lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
|
lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
|
||||||
|
else
|
||||||
|
/*
|
||||||
|
* Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
|
||||||
|
* flag
|
||||||
|
*/
|
||||||
|
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||||
|
|
||||||
if (unreg_fcf) {
|
if (unreg_fcf) {
|
||||||
spin_lock_irq(&phba->hbalock);
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
@ -2085,6 +2125,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||||
else
|
else
|
||||||
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
|
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
|
||||||
|
|
||||||
|
phba->link_events++;
|
||||||
if (la->attType == AT_LINK_UP && (!la->mm)) {
|
if (la->attType == AT_LINK_UP && (!la->mm)) {
|
||||||
phba->fc_stat.LinkUp++;
|
phba->fc_stat.LinkUp++;
|
||||||
if (phba->link_flag & LS_LOOPBACK_MODE) {
|
if (phba->link_flag & LS_LOOPBACK_MODE) {
|
||||||
|
@ -4409,6 +4450,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
|
||||||
if (lpfc_fcf_inuse(phba))
|
if (lpfc_fcf_inuse(phba))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* At this point, all discovery is aborted */
|
||||||
|
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
|
||||||
|
|
||||||
/* Unregister VPIs */
|
/* Unregister VPIs */
|
||||||
vports = lpfc_create_vport_work_array(phba);
|
vports = lpfc_create_vport_work_array(phba);
|
||||||
|
@ -4512,8 +4555,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
|
||||||
|
|
||||||
/* Free the current connect table */
|
/* Free the current connect table */
|
||||||
list_for_each_entry_safe(conn_entry, next_conn_entry,
|
list_for_each_entry_safe(conn_entry, next_conn_entry,
|
||||||
&phba->fcf_conn_rec_list, list)
|
&phba->fcf_conn_rec_list, list) {
|
||||||
|
list_del_init(&conn_entry->list);
|
||||||
kfree(conn_entry);
|
kfree(conn_entry);
|
||||||
|
}
|
||||||
|
|
||||||
conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
|
conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
|
||||||
record_count = conn_hdr->length * sizeof(uint32_t)/
|
record_count = conn_hdr->length * sizeof(uint32_t)/
|
||||||
|
|
|
@ -2919,6 +2919,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
|
||||||
uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
|
uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
phba->fc_eventTag = acqe_fcoe->event_tag;
|
||||||
phba->fcoe_eventtag = acqe_fcoe->event_tag;
|
phba->fcoe_eventtag = acqe_fcoe->event_tag;
|
||||||
switch (event_type) {
|
switch (event_type) {
|
||||||
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
|
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
|
||||||
|
@ -2990,6 +2991,7 @@ static void
|
||||||
lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
|
lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
|
||||||
struct lpfc_acqe_dcbx *acqe_dcbx)
|
struct lpfc_acqe_dcbx *acqe_dcbx)
|
||||||
{
|
{
|
||||||
|
phba->fc_eventTag = acqe_dcbx->event_tag;
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
"0290 The SLI4 DCBX asynchronous event is not "
|
"0290 The SLI4 DCBX asynchronous event is not "
|
||||||
"handled yet\n");
|
"handled yet\n");
|
||||||
|
@ -3594,8 +3596,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
|
||||||
|
|
||||||
/* Free the current connect table */
|
/* Free the current connect table */
|
||||||
list_for_each_entry_safe(conn_entry, next_conn_entry,
|
list_for_each_entry_safe(conn_entry, next_conn_entry,
|
||||||
&phba->fcf_conn_rec_list, list)
|
&phba->fcf_conn_rec_list, list) {
|
||||||
|
list_del_init(&conn_entry->list);
|
||||||
kfree(conn_entry);
|
kfree(conn_entry);
|
||||||
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -5058,15 +5062,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||||
}
|
}
|
||||||
phba->sli4_hba.els_cq = qdesc;
|
phba->sli4_hba.els_cq = qdesc;
|
||||||
|
|
||||||
/* Create slow-path Unsolicited Receive Complete Queue */
|
|
||||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
|
||||||
phba->sli4_hba.cq_ecount);
|
|
||||||
if (!qdesc) {
|
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
||||||
"0502 Failed allocate slow-path USOL RX CQ\n");
|
|
||||||
goto out_free_els_cq;
|
|
||||||
}
|
|
||||||
phba->sli4_hba.rxq_cq = qdesc;
|
|
||||||
|
|
||||||
/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
|
/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
|
||||||
phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
|
phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
|
||||||
|
@ -5075,7 +5070,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
"2577 Failed allocate memory for fast-path "
|
"2577 Failed allocate memory for fast-path "
|
||||||
"CQ record array\n");
|
"CQ record array\n");
|
||||||
goto out_free_rxq_cq;
|
goto out_free_els_cq;
|
||||||
}
|
}
|
||||||
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
|
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
|
||||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
||||||
|
@ -5188,9 +5183,6 @@ out_free_fcp_cq:
|
||||||
phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
|
phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
|
||||||
}
|
}
|
||||||
kfree(phba->sli4_hba.fcp_cq);
|
kfree(phba->sli4_hba.fcp_cq);
|
||||||
out_free_rxq_cq:
|
|
||||||
lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
|
|
||||||
phba->sli4_hba.rxq_cq = NULL;
|
|
||||||
out_free_els_cq:
|
out_free_els_cq:
|
||||||
lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
|
lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
|
||||||
phba->sli4_hba.els_cq = NULL;
|
phba->sli4_hba.els_cq = NULL;
|
||||||
|
@ -5247,10 +5239,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
|
||||||
lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
|
lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
|
||||||
phba->sli4_hba.dat_rq = NULL;
|
phba->sli4_hba.dat_rq = NULL;
|
||||||
|
|
||||||
/* Release unsolicited receive complete queue */
|
|
||||||
lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
|
|
||||||
phba->sli4_hba.rxq_cq = NULL;
|
|
||||||
|
|
||||||
/* Release ELS complete queue */
|
/* Release ELS complete queue */
|
||||||
lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
|
lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
|
||||||
phba->sli4_hba.els_cq = NULL;
|
phba->sli4_hba.els_cq = NULL;
|
||||||
|
@ -5383,25 +5371,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||||
phba->sli4_hba.els_cq->queue_id,
|
phba->sli4_hba.els_cq->queue_id,
|
||||||
phba->sli4_hba.sp_eq->queue_id);
|
phba->sli4_hba.sp_eq->queue_id);
|
||||||
|
|
||||||
/* Set up slow-path Unsolicited Receive Complete Queue */
|
|
||||||
if (!phba->sli4_hba.rxq_cq) {
|
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
||||||
"0532 USOL RX CQ not allocated\n");
|
|
||||||
goto out_destroy_els_cq;
|
|
||||||
}
|
|
||||||
rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
|
|
||||||
LPFC_RCQ, LPFC_USOL);
|
|
||||||
if (rc) {
|
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
||||||
"0533 Failed setup of slow-path USOL RX CQ: "
|
|
||||||
"rc = 0x%x\n", rc);
|
|
||||||
goto out_destroy_els_cq;
|
|
||||||
}
|
|
||||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
||||||
"2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
|
|
||||||
phba->sli4_hba.rxq_cq->queue_id,
|
|
||||||
phba->sli4_hba.sp_eq->queue_id);
|
|
||||||
|
|
||||||
/* Set up fast-path FCP Response Complete Queue */
|
/* Set up fast-path FCP Response Complete Queue */
|
||||||
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
|
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
|
||||||
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
|
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
|
||||||
|
@ -5507,7 +5476,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||||
goto out_destroy_fcp_wq;
|
goto out_destroy_fcp_wq;
|
||||||
}
|
}
|
||||||
rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
|
rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
|
||||||
phba->sli4_hba.rxq_cq, LPFC_USOL);
|
phba->sli4_hba.els_cq, LPFC_USOL);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
"0541 Failed setup of Receive Queue: "
|
"0541 Failed setup of Receive Queue: "
|
||||||
|
@ -5519,7 +5488,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||||
"parent cq-id=%d\n",
|
"parent cq-id=%d\n",
|
||||||
phba->sli4_hba.hdr_rq->queue_id,
|
phba->sli4_hba.hdr_rq->queue_id,
|
||||||
phba->sli4_hba.dat_rq->queue_id,
|
phba->sli4_hba.dat_rq->queue_id,
|
||||||
phba->sli4_hba.rxq_cq->queue_id);
|
phba->sli4_hba.els_cq->queue_id);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_destroy_fcp_wq:
|
out_destroy_fcp_wq:
|
||||||
|
@ -5531,8 +5500,6 @@ out_destroy_mbx_wq:
|
||||||
out_destroy_fcp_cq:
|
out_destroy_fcp_cq:
|
||||||
for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
|
for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
|
||||||
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
|
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
|
||||||
lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
|
|
||||||
out_destroy_els_cq:
|
|
||||||
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
|
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
|
||||||
out_destroy_mbx_cq:
|
out_destroy_mbx_cq:
|
||||||
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
|
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
|
||||||
|
@ -5574,8 +5541,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
|
||||||
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
|
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
|
||||||
/* Unset ELS complete queue */
|
/* Unset ELS complete queue */
|
||||||
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
|
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
|
||||||
/* Unset unsolicited receive complete queue */
|
|
||||||
lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
|
|
||||||
/* Unset FCP response complete queue */
|
/* Unset FCP response complete queue */
|
||||||
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
|
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
|
||||||
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
|
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
|
||||||
|
|
|
@ -3018,16 +3018,31 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
|
||||||
struct lpfc_sli_ring *pring, uint32_t mask)
|
struct lpfc_sli_ring *pring, uint32_t mask)
|
||||||
{
|
{
|
||||||
struct lpfc_iocbq *irspiocbq;
|
struct lpfc_iocbq *irspiocbq;
|
||||||
|
struct hbq_dmabuf *dmabuf;
|
||||||
|
struct lpfc_cq_event *cq_event;
|
||||||
unsigned long iflag;
|
unsigned long iflag;
|
||||||
|
|
||||||
while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
|
while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
|
||||||
/* Get the response iocb from the head of work queue */
|
/* Get the response iocb from the head of work queue */
|
||||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||||
list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
|
list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
|
||||||
irspiocbq, struct lpfc_iocbq, list);
|
cq_event, struct lpfc_cq_event, list);
|
||||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||||
/* Process the response iocb */
|
|
||||||
lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
|
switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
|
||||||
|
case CQE_CODE_COMPL_WQE:
|
||||||
|
irspiocbq = container_of(cq_event, struct lpfc_iocbq,
|
||||||
|
cq_event);
|
||||||
|
lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
|
||||||
|
break;
|
||||||
|
case CQE_CODE_RECEIVE:
|
||||||
|
dmabuf = container_of(cq_event, struct hbq_dmabuf,
|
||||||
|
cq_event);
|
||||||
|
lpfc_sli4_handle_received_buffer(phba, dmabuf);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3416,6 +3431,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
|
||||||
|
|
||||||
/* perform board reset */
|
/* perform board reset */
|
||||||
phba->fc_eventTag = 0;
|
phba->fc_eventTag = 0;
|
||||||
|
phba->link_events = 0;
|
||||||
phba->pport->fc_myDID = 0;
|
phba->pport->fc_myDID = 0;
|
||||||
phba->pport->fc_prevDID = 0;
|
phba->pport->fc_prevDID = 0;
|
||||||
|
|
||||||
|
@ -3476,6 +3492,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
|
||||||
|
|
||||||
/* perform board reset */
|
/* perform board reset */
|
||||||
phba->fc_eventTag = 0;
|
phba->fc_eventTag = 0;
|
||||||
|
phba->link_events = 0;
|
||||||
phba->pport->fc_myDID = 0;
|
phba->pport->fc_myDID = 0;
|
||||||
phba->pport->fc_prevDID = 0;
|
phba->pport->fc_prevDID = 0;
|
||||||
|
|
||||||
|
@ -3495,7 +3512,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
|
||||||
list_del_init(&phba->sli4_hba.dat_rq->list);
|
list_del_init(&phba->sli4_hba.dat_rq->list);
|
||||||
list_del_init(&phba->sli4_hba.mbx_cq->list);
|
list_del_init(&phba->sli4_hba.mbx_cq->list);
|
||||||
list_del_init(&phba->sli4_hba.els_cq->list);
|
list_del_init(&phba->sli4_hba.els_cq->list);
|
||||||
list_del_init(&phba->sli4_hba.rxq_cq->list);
|
|
||||||
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
|
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
|
||||||
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
|
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
|
||||||
for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
|
for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
|
||||||
|
@ -4243,7 +4259,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
|
||||||
|
|
||||||
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
|
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
|
||||||
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
|
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
|
||||||
lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
|
|
||||||
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
|
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
|
||||||
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
|
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
|
||||||
LPFC_QUEUE_REARM);
|
LPFC_QUEUE_REARM);
|
||||||
|
@ -8351,8 +8366,7 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
|
||||||
|
|
||||||
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
|
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
|
||||||
sizeof(struct lpfc_iocbq) - offset);
|
sizeof(struct lpfc_iocbq) - offset);
|
||||||
memset(&pIocbIn->sli4_info, 0,
|
pIocbIn->cq_event.cqe.wcqe_cmpl = *wcqe;
|
||||||
sizeof(struct lpfc_sli4_rspiocb_info));
|
|
||||||
/* Map WCQE parameters into irspiocb parameters */
|
/* Map WCQE parameters into irspiocb parameters */
|
||||||
pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
|
pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
|
||||||
if (pIocbOut->iocb_flag & LPFC_IO_FCP)
|
if (pIocbOut->iocb_flag & LPFC_IO_FCP)
|
||||||
|
@ -8364,16 +8378,6 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
|
||||||
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
|
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
|
||||||
else
|
else
|
||||||
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
|
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
|
||||||
/* Load in additional WCQE parameters */
|
|
||||||
pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
|
|
||||||
pIocbIn->sli4_info.bfield = 0;
|
|
||||||
if (bf_get(lpfc_wcqe_c_xb, wcqe))
|
|
||||||
pIocbIn->sli4_info.bfield |= LPFC_XB;
|
|
||||||
if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
|
|
||||||
pIocbIn->sli4_info.bfield |= LPFC_PV;
|
|
||||||
pIocbIn->sli4_info.priority =
|
|
||||||
bf_get(lpfc_wcqe_c_priority, wcqe);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -8598,7 +8602,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
|
||||||
|
|
||||||
/* Add the irspiocb to the response IOCB work list */
|
/* Add the irspiocb to the response IOCB work list */
|
||||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||||
list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
|
list_add_tail(&irspiocbq->cq_event.list,
|
||||||
|
&phba->sli4_hba.sp_rspiocb_work_queue);
|
||||||
/* Indicate ELS ring attention */
|
/* Indicate ELS ring attention */
|
||||||
phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
|
phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
|
||||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||||
|
@ -8690,17 +8695,78 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
|
* lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
|
||||||
* @phba: Pointer to HBA context object.
|
* @phba: Pointer to HBA context object.
|
||||||
* @cq: Pointer to the completion queue.
|
* @rcqe: Pointer to receive-queue completion queue entry.
|
||||||
* @wcqe: Pointer to a completion queue entry.
|
|
||||||
*
|
*
|
||||||
* This routine process a slow-path work-queue completion queue entry.
|
* This routine process a receive-queue completion queue entry.
|
||||||
*
|
*
|
||||||
* Return: true if work posted to worker thread, otherwise false.
|
* Return: true if work posted to worker thread, otherwise false.
|
||||||
**/
|
**/
|
||||||
static bool
|
static bool
|
||||||
lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
|
||||||
|
{
|
||||||
|
bool workposted = false;
|
||||||
|
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
|
||||||
|
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
|
||||||
|
struct hbq_dmabuf *dma_buf;
|
||||||
|
uint32_t status;
|
||||||
|
unsigned long iflags;
|
||||||
|
|
||||||
|
lpfc_sli4_rq_release(hrq, drq);
|
||||||
|
if (bf_get(lpfc_rcqe_code, rcqe) != CQE_CODE_RECEIVE)
|
||||||
|
goto out;
|
||||||
|
if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
status = bf_get(lpfc_rcqe_status, rcqe);
|
||||||
|
switch (status) {
|
||||||
|
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
|
"2537 Receive Frame Truncated!!\n");
|
||||||
|
case FC_STATUS_RQ_SUCCESS:
|
||||||
|
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||||
|
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
|
||||||
|
if (!dma_buf) {
|
||||||
|
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
|
||||||
|
/* save off the frame for the word thread to process */
|
||||||
|
list_add_tail(&dma_buf->cq_event.list,
|
||||||
|
&phba->sli4_hba.sp_rspiocb_work_queue);
|
||||||
|
/* Frame received */
|
||||||
|
phba->hba_flag |= HBA_RECEIVE_BUFFER;
|
||||||
|
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||||
|
workposted = true;
|
||||||
|
break;
|
||||||
|
case FC_STATUS_INSUFF_BUF_NEED_BUF:
|
||||||
|
case FC_STATUS_INSUFF_BUF_FRM_DISC:
|
||||||
|
/* Post more buffers if possible */
|
||||||
|
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||||
|
phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
|
||||||
|
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||||
|
workposted = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
return workposted;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
|
||||||
|
* @phba: Pointer to HBA context object.
|
||||||
|
* @cq: Pointer to the completion queue.
|
||||||
|
* @wcqe: Pointer to a completion queue entry.
|
||||||
|
*
|
||||||
|
* This routine process a slow-path work-queue or recieve queue completion queue
|
||||||
|
* entry.
|
||||||
|
*
|
||||||
|
* Return: true if work posted to worker thread, otherwise false.
|
||||||
|
**/
|
||||||
|
static bool
|
||||||
|
lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||||
struct lpfc_cqe *cqe)
|
struct lpfc_cqe *cqe)
|
||||||
{
|
{
|
||||||
struct lpfc_wcqe_complete wcqe;
|
struct lpfc_wcqe_complete wcqe;
|
||||||
|
@ -8726,6 +8792,11 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||||
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
|
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
|
||||||
(struct sli4_wcqe_xri_aborted *)&wcqe);
|
(struct sli4_wcqe_xri_aborted *)&wcqe);
|
||||||
break;
|
break;
|
||||||
|
case CQE_CODE_RECEIVE:
|
||||||
|
/* Process the RQ event */
|
||||||
|
workposted = lpfc_sli4_sp_handle_rcqe(phba,
|
||||||
|
(struct lpfc_rcqe *)&wcqe);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
"0388 Not a valid WCQE code: x%x\n",
|
"0388 Not a valid WCQE code: x%x\n",
|
||||||
|
@ -8735,68 +8806,6 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||||
return workposted;
|
return workposted;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
|
|
||||||
* @phba: Pointer to HBA context object.
|
|
||||||
* @rcqe: Pointer to receive-queue completion queue entry.
|
|
||||||
*
|
|
||||||
* This routine process a receive-queue completion queue entry.
|
|
||||||
*
|
|
||||||
* Return: true if work posted to worker thread, otherwise false.
|
|
||||||
**/
|
|
||||||
static bool
|
|
||||||
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
|
|
||||||
{
|
|
||||||
struct lpfc_rcqe rcqe;
|
|
||||||
bool workposted = false;
|
|
||||||
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
|
|
||||||
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
|
|
||||||
struct hbq_dmabuf *dma_buf;
|
|
||||||
uint32_t status;
|
|
||||||
unsigned long iflags;
|
|
||||||
|
|
||||||
/* Copy the receive queue CQE and convert endian order if needed */
|
|
||||||
lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
|
|
||||||
lpfc_sli4_rq_release(hrq, drq);
|
|
||||||
if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
|
|
||||||
goto out;
|
|
||||||
if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
status = bf_get(lpfc_rcqe_status, &rcqe);
|
|
||||||
switch (status) {
|
|
||||||
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
|
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
||||||
"2537 Receive Frame Truncated!!\n");
|
|
||||||
case FC_STATUS_RQ_SUCCESS:
|
|
||||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
|
||||||
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
|
|
||||||
if (!dma_buf) {
|
|
||||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
|
|
||||||
/* save off the frame for the word thread to process */
|
|
||||||
list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
|
|
||||||
/* Frame received */
|
|
||||||
phba->hba_flag |= HBA_RECEIVE_BUFFER;
|
|
||||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
||||||
workposted = true;
|
|
||||||
break;
|
|
||||||
case FC_STATUS_INSUFF_BUF_NEED_BUF:
|
|
||||||
case FC_STATUS_INSUFF_BUF_FRM_DISC:
|
|
||||||
/* Post more buffers if possible */
|
|
||||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
|
||||||
phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
|
|
||||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
||||||
workposted = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
return workposted;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
|
* lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
|
||||||
* @phba: Pointer to HBA context object.
|
* @phba: Pointer to HBA context object.
|
||||||
|
@ -8858,14 +8867,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
|
||||||
break;
|
break;
|
||||||
case LPFC_WCQ:
|
case LPFC_WCQ:
|
||||||
while ((cqe = lpfc_sli4_cq_get(cq))) {
|
while ((cqe = lpfc_sli4_cq_get(cq))) {
|
||||||
workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
|
workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
|
||||||
if (!(++ecount % LPFC_GET_QE_REL_INT))
|
|
||||||
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case LPFC_RCQ:
|
|
||||||
while ((cqe = lpfc_sli4_cq_get(cq))) {
|
|
||||||
workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
|
|
||||||
if (!(++ecount % LPFC_GET_QE_REL_INT))
|
if (!(++ecount % LPFC_GET_QE_REL_INT))
|
||||||
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
|
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
|
||||||
}
|
}
|
||||||
|
@ -10823,6 +10825,7 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
|
||||||
struct hbq_dmabuf *seq_dmabuf = NULL;
|
struct hbq_dmabuf *seq_dmabuf = NULL;
|
||||||
struct hbq_dmabuf *temp_dmabuf = NULL;
|
struct hbq_dmabuf *temp_dmabuf = NULL;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&dmabuf->dbuf.list);
|
||||||
new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
|
new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
|
||||||
/* Use the hdr_buf to find the sequence that this frame belongs to */
|
/* Use the hdr_buf to find the sequence that this frame belongs to */
|
||||||
list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
|
list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
|
||||||
|
@ -10845,7 +10848,9 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
|
||||||
}
|
}
|
||||||
temp_hdr = seq_dmabuf->hbuf.virt;
|
temp_hdr = seq_dmabuf->hbuf.virt;
|
||||||
if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
|
if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
|
||||||
list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
|
list_del_init(&seq_dmabuf->hbuf.list);
|
||||||
|
list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
|
||||||
|
list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
|
||||||
return dmabuf;
|
return dmabuf;
|
||||||
}
|
}
|
||||||
/* find the correct place in the sequence to insert this frame */
|
/* find the correct place in the sequence to insert this frame */
|
||||||
|
@ -10957,7 +10962,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||||
LPFC_DATA_BUF_SIZE;
|
LPFC_DATA_BUF_SIZE;
|
||||||
first_iocbq->iocb.un.rcvels.remoteID = sid;
|
first_iocbq->iocb.un.rcvels.remoteID = sid;
|
||||||
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
|
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
|
||||||
bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
|
bf_get(lpfc_rcqe_length,
|
||||||
|
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||||
}
|
}
|
||||||
iocbq = first_iocbq;
|
iocbq = first_iocbq;
|
||||||
/*
|
/*
|
||||||
|
@ -10975,7 +10981,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||||
iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
|
iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
|
||||||
LPFC_DATA_BUF_SIZE;
|
LPFC_DATA_BUF_SIZE;
|
||||||
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
|
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
|
||||||
bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
|
bf_get(lpfc_rcqe_length,
|
||||||
|
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||||
} else {
|
} else {
|
||||||
iocbq = lpfc_sli_get_iocbq(vport->phba);
|
iocbq = lpfc_sli_get_iocbq(vport->phba);
|
||||||
if (!iocbq) {
|
if (!iocbq) {
|
||||||
|
@ -10994,7 +11001,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||||
iocbq->iocb.un.cont64[0].tus.f.bdeSize =
|
iocbq->iocb.un.cont64[0].tus.f.bdeSize =
|
||||||
LPFC_DATA_BUF_SIZE;
|
LPFC_DATA_BUF_SIZE;
|
||||||
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
|
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
|
||||||
bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
|
bf_get(lpfc_rcqe_length,
|
||||||
|
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||||
iocbq->iocb.un.rcvels.remoteID = sid;
|
iocbq->iocb.un.rcvels.remoteID = sid;
|
||||||
list_add_tail(&iocbq->list, &first_iocbq->list);
|
list_add_tail(&iocbq->list, &first_iocbq->list);
|
||||||
}
|
}
|
||||||
|
@ -11014,11 +11022,11 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||||
* Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
|
* Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
|
||||||
* appropriate receive function when the final frame in a sequence is received.
|
* appropriate receive function when the final frame in a sequence is received.
|
||||||
**/
|
**/
|
||||||
int
|
void
|
||||||
lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
|
lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
|
||||||
|
struct hbq_dmabuf *dmabuf)
|
||||||
{
|
{
|
||||||
LIST_HEAD(cmplq);
|
struct hbq_dmabuf *seq_dmabuf;
|
||||||
struct hbq_dmabuf *dmabuf, *seq_dmabuf;
|
|
||||||
struct fc_frame_header *fc_hdr;
|
struct fc_frame_header *fc_hdr;
|
||||||
struct lpfc_vport *vport;
|
struct lpfc_vport *vport;
|
||||||
uint32_t fcfi;
|
uint32_t fcfi;
|
||||||
|
@ -11027,54 +11035,50 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
|
||||||
/* Clear hba flag and get all received buffers into the cmplq */
|
/* Clear hba flag and get all received buffers into the cmplq */
|
||||||
spin_lock_irq(&phba->hbalock);
|
spin_lock_irq(&phba->hbalock);
|
||||||
phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
|
phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
|
||||||
list_splice_init(&phba->rb_pend_list, &cmplq);
|
|
||||||
spin_unlock_irq(&phba->hbalock);
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
|
||||||
/* Process each received buffer */
|
/* Process each received buffer */
|
||||||
while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
|
fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
|
||||||
fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
|
/* check to see if this a valid type of frame */
|
||||||
/* check to see if this a valid type of frame */
|
if (lpfc_fc_frame_check(phba, fc_hdr)) {
|
||||||
if (lpfc_fc_frame_check(phba, fc_hdr)) {
|
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||||||
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
return;
|
||||||
continue;
|
}
|
||||||
}
|
fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||||
fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
|
vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
|
||||||
vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
|
if (!vport) {
|
||||||
if (!vport) {
|
/* throw out the frame */
|
||||||
/* throw out the frame */
|
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||||||
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
return;
|
||||||
continue;
|
}
|
||||||
}
|
/* Link this frame */
|
||||||
/* Link this frame */
|
seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
|
||||||
seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
|
if (!seq_dmabuf) {
|
||||||
if (!seq_dmabuf) {
|
/* unable to add frame to vport - throw it out */
|
||||||
/* unable to add frame to vport - throw it out */
|
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||||||
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
return;
|
||||||
continue;
|
}
|
||||||
}
|
/* If not last frame in sequence continue processing frames. */
|
||||||
/* If not last frame in sequence continue processing frames. */
|
if (!lpfc_seq_complete(seq_dmabuf)) {
|
||||||
if (!lpfc_seq_complete(seq_dmabuf)) {
|
/*
|
||||||
/*
|
* When saving off frames post a new one and mark this
|
||||||
* When saving off frames post a new one and mark this
|
* frame to be freed when it is finished.
|
||||||
* frame to be freed when it is finished.
|
**/
|
||||||
**/
|
lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
|
||||||
lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
|
dmabuf->tag = -1;
|
||||||
dmabuf->tag = -1;
|
return;
|
||||||
continue;
|
}
|
||||||
}
|
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
|
||||||
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
|
iocbq = lpfc_prep_seq(vport, seq_dmabuf);
|
||||||
iocbq = lpfc_prep_seq(vport, seq_dmabuf);
|
if (!lpfc_complete_unsol_iocb(phba,
|
||||||
if (!lpfc_complete_unsol_iocb(phba,
|
&phba->sli.ring[LPFC_ELS_RING],
|
||||||
&phba->sli.ring[LPFC_ELS_RING],
|
iocbq, fc_hdr->fh_r_ctl,
|
||||||
iocbq, fc_hdr->fh_r_ctl,
|
fc_hdr->fh_type))
|
||||||
fc_hdr->fh_type))
|
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
|
||||||
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
|
"2540 Ring %d handler: unexpected Rctl "
|
||||||
"2540 Ring %d handler: unexpected Rctl "
|
"x%x Type x%x received\n",
|
||||||
"x%x Type x%x received\n",
|
LPFC_ELS_RING,
|
||||||
LPFC_ELS_RING,
|
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
|
||||||
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
|
|
||||||
};
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -11542,7 +11546,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
"2000 Failed to allocate mbox for "
|
"2000 Failed to allocate mbox for "
|
||||||
"READ_FCF cmd\n");
|
"READ_FCF cmd\n");
|
||||||
return -ENOMEM;
|
error = -ENOMEM;
|
||||||
|
goto fail_fcfscan;
|
||||||
}
|
}
|
||||||
|
|
||||||
req_len = sizeof(struct fcf_record) +
|
req_len = sizeof(struct fcf_record) +
|
||||||
|
@ -11558,8 +11563,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||||
"0291 Allocated DMA memory size (x%x) is "
|
"0291 Allocated DMA memory size (x%x) is "
|
||||||
"less than the requested DMA memory "
|
"less than the requested DMA memory "
|
||||||
"size (x%x)\n", alloc_len, req_len);
|
"size (x%x)\n", alloc_len, req_len);
|
||||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
error = -ENOMEM;
|
||||||
return -ENOMEM;
|
goto fail_fcfscan;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get the first SGE entry from the non-embedded DMA memory. This
|
/* Get the first SGE entry from the non-embedded DMA memory. This
|
||||||
|
@ -11571,8 +11576,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
|
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
|
||||||
"2527 Failed to get the non-embedded SGE "
|
"2527 Failed to get the non-embedded SGE "
|
||||||
"virtual address\n");
|
"virtual address\n");
|
||||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
error = -ENOMEM;
|
||||||
return -ENOMEM;
|
goto fail_fcfscan;
|
||||||
}
|
}
|
||||||
virt_addr = mboxq->sge_array->addr[0];
|
virt_addr = mboxq->sge_array->addr[0];
|
||||||
read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
|
read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
|
||||||
|
@ -11586,7 +11591,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||||
mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
|
mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
|
||||||
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
|
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
|
||||||
if (rc == MBX_NOT_FINISHED) {
|
if (rc == MBX_NOT_FINISHED) {
|
||||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
|
||||||
error = -EIO;
|
error = -EIO;
|
||||||
} else {
|
} else {
|
||||||
spin_lock_irq(&phba->hbalock);
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
@ -11594,6 +11598,15 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||||
spin_unlock_irq(&phba->hbalock);
|
spin_unlock_irq(&phba->hbalock);
|
||||||
error = 0;
|
error = 0;
|
||||||
}
|
}
|
||||||
|
fail_fcfscan:
|
||||||
|
if (error) {
|
||||||
|
if (mboxq)
|
||||||
|
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||||
|
/* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
}
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,14 +29,17 @@ typedef enum _lpfc_ctx_cmd {
|
||||||
LPFC_CTX_HOST
|
LPFC_CTX_HOST
|
||||||
} lpfc_ctx_cmd;
|
} lpfc_ctx_cmd;
|
||||||
|
|
||||||
/* This structure is used to carry the needed response IOCB states */
|
struct lpfc_cq_event {
|
||||||
struct lpfc_sli4_rspiocb_info {
|
struct list_head list;
|
||||||
uint8_t hw_status;
|
union {
|
||||||
uint8_t bfield;
|
struct lpfc_mcqe mcqe_cmpl;
|
||||||
#define LPFC_XB 0x1
|
struct lpfc_acqe_link acqe_link;
|
||||||
#define LPFC_PV 0x2
|
struct lpfc_acqe_fcoe acqe_fcoe;
|
||||||
uint8_t priority;
|
struct lpfc_acqe_dcbx acqe_dcbx;
|
||||||
uint8_t reserved;
|
struct lpfc_rcqe rcqe_cmpl;
|
||||||
|
struct sli4_wcqe_xri_aborted wcqe_axri;
|
||||||
|
struct lpfc_wcqe_complete wcqe_cmpl;
|
||||||
|
} cqe;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* This structure is used to handle IOCB requests / responses */
|
/* This structure is used to handle IOCB requests / responses */
|
||||||
|
@ -76,7 +79,7 @@ struct lpfc_iocbq {
|
||||||
struct lpfc_iocbq *);
|
struct lpfc_iocbq *);
|
||||||
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
|
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
|
||||||
struct lpfc_iocbq *);
|
struct lpfc_iocbq *);
|
||||||
struct lpfc_sli4_rspiocb_info sli4_info;
|
struct lpfc_cq_event cq_event;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
|
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
|
||||||
|
|
|
@ -110,18 +110,6 @@ struct lpfc_queue {
|
||||||
union sli4_qe qe[1]; /* array to index entries (must be last) */
|
union sli4_qe qe[1]; /* array to index entries (must be last) */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct lpfc_cq_event {
|
|
||||||
struct list_head list;
|
|
||||||
union {
|
|
||||||
struct lpfc_mcqe mcqe_cmpl;
|
|
||||||
struct lpfc_acqe_link acqe_link;
|
|
||||||
struct lpfc_acqe_fcoe acqe_fcoe;
|
|
||||||
struct lpfc_acqe_dcbx acqe_dcbx;
|
|
||||||
struct lpfc_rcqe rcqe_cmpl;
|
|
||||||
struct sli4_wcqe_xri_aborted wcqe_axri;
|
|
||||||
} cqe;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct lpfc_sli4_link {
|
struct lpfc_sli4_link {
|
||||||
uint8_t speed;
|
uint8_t speed;
|
||||||
uint8_t duplex;
|
uint8_t duplex;
|
||||||
|
@ -325,7 +313,6 @@ struct lpfc_sli4_hba {
|
||||||
struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
|
struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
|
||||||
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
|
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
|
||||||
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
|
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
|
||||||
struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
|
|
||||||
|
|
||||||
/* Setup information for various queue parameters */
|
/* Setup information for various queue parameters */
|
||||||
int eq_esize;
|
int eq_esize;
|
||||||
|
|
Loading…
Add table
Reference in a new issue