SCSI misc on 20130915

This patch set is a set of driver updates (megaraid_sas, fnic, lpfc, ufs,
 hpsa) we also have a couple of bug fixes (sd out of bounds and ibmvfc error
 handling) and the first round of esas2r checker fixes and finally the much
 anticipated big endian additions for megaraid_sas.
 
 Signed-off-by: James Bottomley <JBottomley@Parallels.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.19 (GNU/Linux)
 
 iQEcBAABAgAGBQJSNheiAAoJEDeqqVYsXL0MueMIAKD1kaB0oooRawE1+0vpKmyV
 eE2M6trA8ofTeq0z1eNfRsVMkRsUuG9exW0CKS2z6mHiWwQ/zGbqT7ukveW+dMi3
 mjKD0yO5ODk6bohWX/LiwZ6NGZSwC0dbIacXNy5ZsXKEizqwo1Jcc7qC/0AWn+o7
 WpIL48XLPH0HqjQZ3dvgC6TWeFZOn9cKOWvQQq0S3ENALOx/eLZ+C7VrJLx5Magv
 myNOUkTLzdlYglQfjaNO6et98k2oHTrzKwH7U2X6U75q7L8Pkj4RbNzce/Ge301V
 u+R1w+BlbeTPdHopTBoTJupsvqDYBZxVwS7rr8nhSvfKduQppHnN6jX8yR4XNeM=
 =RG3j
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull misc SCSI driver updates from James Bottomley:
 "This patch set is a set of driver updates (megaraid_sas, fnic, lpfc,
  ufs, hpsa) we also have a couple of bug fixes (sd out of bounds and
  ibmvfc error handling) and the first round of esas2r checker fixes and
  finally the much anticipated big endian additions for megaraid_sas"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (47 commits)
  [SCSI] fnic: fnic Driver Tuneables Exposed through CLI
  [SCSI] fnic: Kernel panic while running sh/nosh with max lun cfg
  [SCSI] fnic: Hitting BUG_ON(io_req->abts_done) in fnic_rport_exch_reset
  [SCSI] fnic: Remove QUEUE_FULL handling code
  [SCSI] fnic: On system with >1.1TB RAM, VIC fails multipath after boot up
  [SCSI] fnic: FC stat param seconds_since_last_reset not getting updated
  [SCSI] sd: Fix potential out-of-bounds access
  [SCSI] lpfc 8.3.42: Update lpfc version to driver version 8.3.42
  [SCSI] lpfc 8.3.42: Fixed issue of task management commands having a fixed timeout
  [SCSI] lpfc 8.3.42: Fixed inconsistent spin lock usage.
  [SCSI] lpfc 8.3.42: Fix driver's abort loop functionality to skip IOs already getting aborted
  [SCSI] lpfc 8.3.42: Fixed failure to allocate SCSI buffer on PPC64 platform for SLI4 devices
  [SCSI] lpfc 8.3.42: Fix WARN_ON when driver unloads
  [SCSI] lpfc 8.3.42: Avoided making pci bar ioremap call during dual-chute WQ/RQ pci bar selection
  [SCSI] lpfc 8.3.42: Fixed driver iocbq structure's iocb_flag field running out of space
  [SCSI] lpfc 8.3.42: Fix crash on driver load due to cpu affinity logic
  [SCSI] lpfc 8.3.42: Fixed logging format of setting driver sysfs attributes hard to interpret
  [SCSI] lpfc 8.3.42: Fixed back to back RSCNs discovery failure.
  [SCSI] lpfc 8.3.42: Fixed race condition between BSG I/O dispatch and timeout handling
  [SCSI] lpfc 8.3.42: Fixed function mode field defined too small for not recognizing dual-chute mode
  ...
This commit is contained in:
Linus Torvalds 2013-09-15 17:41:30 -04:00
commit 0375ec5899
37 changed files with 1720 additions and 680 deletions

View file

@ -1,3 +1,13 @@
Release Date : Sat. Aug 31, 2013 17:00:00 PST 2013 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
Kashyap Desai
Sumit Saxena
Current Version : 06.700.06.00-rc1
Old Version : 06.600.18.00-rc1
1. Add High Availability clustering support using shared Logical Disks.
2. Version and Changelog update.
-------------------------------------------------------------------------------
Release Date : Wed. May 15, 2013 17:00:00 PST 2013 - Release Date : Wed. May 15, 2013 17:00:00 PST 2013 -
(emaild-id:megaraidlinux@lsi.com) (emaild-id:megaraidlinux@lsi.com)
Adam Radford Adam Radford

View file

@ -692,7 +692,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
* ID as valid. * ID as valid.
*/ */
if (ahc_get_pci_function(pci) > 0 if (ahc_get_pci_function(pci) > 0
&& ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice) && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor)
&& SUBID_9005_MFUNCENB(subdevice) == 0) && SUBID_9005_MFUNCENB(subdevice) == 0)
return (NULL); return (NULL);

View file

@ -860,8 +860,13 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
return false; return false;
} }
if (fsc->command >= cmdcnt) {
fs->status = ATTO_STS_INV_FUNC;
return false;
}
func = cmd_to_fls_func[fsc->command]; func = cmd_to_fls_func[fsc->command];
if (fsc->command >= cmdcnt || func == 0xFF) { if (func == 0xFF) {
fs->status = ATTO_STS_INV_FUNC; fs->status = ATTO_STS_INV_FUNC;
return false; return false;
} }
@ -1355,7 +1360,7 @@ void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
u32 time = jiffies_to_msecs(jiffies); u32 time = jiffies_to_msecs(jiffies);
esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); *n = default_sas_nvram;
n->sas_addr[3] |= 0x0F; n->sas_addr[3] |= 0x0F;
n->sas_addr[4] = HIBYTE(LOWORD(time)); n->sas_addr[4] = HIBYTE(LOWORD(time));
n->sas_addr[5] = LOBYTE(LOWORD(time)); n->sas_addr[5] = LOBYTE(LOWORD(time));
@ -1373,7 +1378,7 @@ void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
* address out first. * address out first.
*/ */
memcpy(&sas_addr[0], a->nvram->sas_addr, 8); memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); *nvram = default_sas_nvram;
memcpy(&nvram->sas_addr[0], &sas_addr[0], 8); memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
} }

View file

@ -665,7 +665,7 @@ void esas2r_kill_adapter(int i)
int esas2r_cleanup(struct Scsi_Host *host) int esas2r_cleanup(struct Scsi_Host *host)
{ {
struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; struct esas2r_adapter *a;
int index; int index;
if (host == NULL) { if (host == NULL) {
@ -678,6 +678,7 @@ int esas2r_cleanup(struct Scsi_Host *host)
} }
esas2r_debug("esas2r_cleanup called for host %p", host); esas2r_debug("esas2r_cleanup called for host %p", host);
a = (struct esas2r_adapter *)host->hostdata;
index = a->index; index = a->index;
esas2r_kill_adapter(index); esas2r_kill_adapter(index);
return index; return index;
@ -808,7 +809,7 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
int pcie_cap_reg; int pcie_cap_reg;
pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
if (0xffff && pcie_cap_reg) { if (0xffff & pcie_cap_reg) {
u16 devcontrol; u16 devcontrol;
pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
@ -1550,8 +1551,7 @@ void esas2r_reset_chip(struct esas2r_adapter *a)
* to not overwrite a previous crash that was saved. * to not overwrite a previous crash that was saved.
*/ */
if ((a->flags2 & AF2_COREDUMP_AVAIL) if ((a->flags2 & AF2_COREDUMP_AVAIL)
&& !(a->flags2 & AF2_COREDUMP_SAVED) && !(a->flags2 & AF2_COREDUMP_SAVED)) {
&& a->fw_coredump_buff) {
esas2r_read_mem_block(a, esas2r_read_mem_block(a,
a->fw_coredump_buff, a->fw_coredump_buff,
MW_DATA_ADDR_SRAM + 0x80000, MW_DATA_ADDR_SRAM + 0x80000,

View file

@ -415,7 +415,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,
lun = tm->lun; lun = tm->lun;
} }
if (path > 0 || tid > ESAS2R_MAX_ID) { if (path > 0) {
rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
CSMI_STS_INV_PARAM); CSMI_STS_INV_PARAM);
return false; return false;

View file

@ -302,6 +302,7 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg; struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp; struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
char buf[sizeof(cfg->data.init.fw_release) + 1];
cfg->data_length = cfg->data_length =
cpu_to_le32(sizeof(struct atto_vda_cfg_init)); cpu_to_le32(sizeof(struct atto_vda_cfg_init));
@ -309,11 +310,13 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
le32_to_cpu(rsp->vda_version); le32_to_cpu(rsp->vda_version);
cfg->data.init.fw_build = rsp->fw_build; cfg->data.init.fw_build = rsp->fw_build;
sprintf((char *)&cfg->data.init.fw_release, snprintf(buf, sizeof(buf), "%1d.%02d",
"%1d.%02d",
(int)LOBYTE(le16_to_cpu(rsp->fw_release)), (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
(int)HIBYTE(le16_to_cpu(rsp->fw_release))); (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
memcpy(&cfg->data.init.fw_release, buf,
sizeof(cfg->data.init.fw_release));
if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A') if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
cfg->data.init.fw_version = cfg->data.init.fw_version =
cfg->data.init.fw_build; cfg->data.init.fw_build;

View file

@ -43,6 +43,8 @@
#define DFX DRV_NAME "%d: " #define DFX DRV_NAME "%d: "
#define DESC_CLEAN_LOW_WATERMARK 8 #define DESC_CLEAN_LOW_WATERMARK 8
#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ #define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define FNIC_DFLT_QUEUE_DEPTH 32 #define FNIC_DFLT_QUEUE_DEPTH 32
@ -154,6 +156,9 @@ do { \
FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);) shost_printk(kern_level, host, fmt, ##args);)
#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \
shost_printk(kern_level, host, fmt, ##args)
extern const char *fnic_state_str[]; extern const char *fnic_state_str[];
enum fnic_intx_intr_index { enum fnic_intx_intr_index {
@ -215,10 +220,12 @@ struct fnic {
struct vnic_stats *stats; struct vnic_stats *stats;
unsigned long stats_time; /* time of stats update */ unsigned long stats_time; /* time of stats update */
unsigned long stats_reset_time; /* time of stats reset */
struct vnic_nic_cfg *nic_cfg; struct vnic_nic_cfg *nic_cfg;
char name[IFNAMSIZ]; char name[IFNAMSIZ];
struct timer_list notify_timer; /* used for MSI interrupts */ struct timer_list notify_timer; /* used for MSI interrupts */
unsigned int fnic_max_tag_id;
unsigned int err_intr_offset; unsigned int err_intr_offset;
unsigned int link_intr_offset; unsigned int link_intr_offset;
@ -359,4 +366,5 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
return ((fnic->state_flags & st_flags) == st_flags); return ((fnic->state_flags & st_flags) == st_flags);
} }
void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
#endif /* _FNIC_H_ */ #endif /* _FNIC_H_ */

View file

@ -74,6 +74,10 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
"for fnic trace buffer"); "for fnic trace buffer");
static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
static struct libfc_function_template fnic_transport_template = { static struct libfc_function_template fnic_transport_template = {
.frame_send = fnic_send, .frame_send = fnic_send,
.lport_set_port_id = fnic_set_port_id, .lport_set_port_id = fnic_set_port_id,
@ -91,7 +95,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
if (!rport || fc_remote_port_chkready(rport)) if (!rport || fc_remote_port_chkready(rport))
return -ENXIO; return -ENXIO;
scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); scsi_activate_tcq(sdev, fnic_max_qdepth);
return 0; return 0;
} }
@ -126,6 +130,7 @@ fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
static void fnic_get_host_speed(struct Scsi_Host *shost); static void fnic_get_host_speed(struct Scsi_Host *shost);
static struct scsi_transport_template *fnic_fc_transport; static struct scsi_transport_template *fnic_fc_transport;
static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
static void fnic_reset_host_stats(struct Scsi_Host *);
static struct fc_function_template fnic_fc_functions = { static struct fc_function_template fnic_fc_functions = {
@ -153,6 +158,7 @@ static struct fc_function_template fnic_fc_functions = {
.set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
.issue_fc_host_lip = fnic_reset, .issue_fc_host_lip = fnic_reset,
.get_fc_host_stats = fnic_get_stats, .get_fc_host_stats = fnic_get_stats,
.reset_fc_host_stats = fnic_reset_host_stats,
.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
.terminate_rport_io = fnic_terminate_rport_io, .terminate_rport_io = fnic_terminate_rport_io,
.bsg_request = fc_lport_bsg_request, .bsg_request = fc_lport_bsg_request,
@ -206,13 +212,116 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
stats->invalid_crc_count = vs->rx.rx_crc_errors; stats->invalid_crc_count = vs->rx.rx_crc_errors;
stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ; stats->seconds_since_last_reset =
(jiffies - fnic->stats_reset_time) / HZ;
stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
return stats; return stats;
} }
/*
* fnic_dump_fchost_stats
* note : dumps fc_statistics into system logs
*/
void fnic_dump_fchost_stats(struct Scsi_Host *host,
struct fc_host_statistics *stats)
{
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: seconds since last reset = %llu\n",
stats->seconds_since_last_reset);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: tx frames = %llu\n",
stats->tx_frames);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: tx words = %llu\n",
stats->tx_words);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: rx frames = %llu\n",
stats->rx_frames);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: rx words = %llu\n",
stats->rx_words);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: lip count = %llu\n",
stats->lip_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: nos count = %llu\n",
stats->nos_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: error frames = %llu\n",
stats->error_frames);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: dumped frames = %llu\n",
stats->dumped_frames);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: link failure count = %llu\n",
stats->link_failure_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: loss of sync count = %llu\n",
stats->loss_of_sync_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: loss of signal count = %llu\n",
stats->loss_of_signal_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: prim seq protocol err count = %llu\n",
stats->prim_seq_protocol_err_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: invalid tx word count= %llu\n",
stats->invalid_tx_word_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: invalid crc count = %llu\n",
stats->invalid_crc_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: fcp input requests = %llu\n",
stats->fcp_input_requests);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: fcp output requests = %llu\n",
stats->fcp_output_requests);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: fcp control requests = %llu\n",
stats->fcp_control_requests);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: fcp input megabytes = %llu\n",
stats->fcp_input_megabytes);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: fcp output megabytes = %llu\n",
stats->fcp_output_megabytes);
return;
}
/*
* fnic_reset_host_stats : clears host stats
* note : called when reset_statistics set under sysfs dir
*/
static void fnic_reset_host_stats(struct Scsi_Host *host)
{
int ret;
struct fc_lport *lp = shost_priv(host);
struct fnic *fnic = lport_priv(lp);
struct fc_host_statistics *stats;
unsigned long flags;
/* dump current stats, before clearing them */
stats = fnic_get_stats(host);
fnic_dump_fchost_stats(host, stats);
spin_lock_irqsave(&fnic->fnic_lock, flags);
ret = vnic_dev_stats_clear(fnic->vdev);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
"fnic: Reset vnic stats failed"
" 0x%x", ret);
return;
}
fnic->stats_reset_time = jiffies;
memset(stats, 0, sizeof(*stats));
return;
}
void fnic_log_q_error(struct fnic *fnic) void fnic_log_q_error(struct fnic *fnic)
{ {
unsigned int i; unsigned int i;
@ -447,13 +556,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
host->transportt = fnic_fc_transport; host->transportt = fnic_fc_transport;
err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to alloc shared tag map\n");
goto err_out_free_hba;
}
/* Setup PCI resources */ /* Setup PCI resources */
pci_set_drvdata(pdev, fnic); pci_set_drvdata(pdev, fnic);
@ -476,10 +578,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev); pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing /* Query PCI controller on system for DMA addressing
* limitation for the device. Try 40-bit first, and * limitation for the device. Try 64-bit first, and
* fail to 32-bit. * fail to 32-bit.
*/ */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) { if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) { if (err) {
@ -496,10 +598,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_release_regions; goto err_out_release_regions;
} }
} else { } else {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) { if (err) {
shost_printk(KERN_ERR, fnic->lport->host, shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 40-bit DMA " "Unable to obtain 64-bit DMA "
"for consistent allocations, aborting.\n"); "for consistent allocations, aborting.\n");
goto err_out_release_regions; goto err_out_release_regions;
} }
@ -566,6 +668,22 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"aborting.\n"); "aborting.\n");
goto err_out_dev_close; goto err_out_dev_close;
} }
/* Configure Maximum Outstanding IO reqs*/
if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
max_t(u32, FNIC_MIN_IO_REQ,
fnic->config.io_throttle_count));
}
fnic->fnic_max_tag_id = host->can_queue;
err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to alloc shared tag map\n");
goto err_out_dev_close;
}
host->max_lun = fnic->config.luns_per_tgt; host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET; host->max_id = FNIC_MAX_FCP_TARGET;
host->max_cmd_len = FCOE_MAX_CMD_LEN; host->max_cmd_len = FCOE_MAX_CMD_LEN;
@ -719,6 +837,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
fc_lport_init_stats(lp); fc_lport_init_stats(lp);
fnic->stats_reset_time = jiffies;
fc_lport_config(lp); fc_lport_config(lp);

View file

@ -111,6 +111,12 @@ static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
return &fnic->io_req_lock[hash]; return &fnic->io_req_lock[hash];
} }
static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
int tag)
{
return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
}
/* /*
* Unmap the data buffer and sense buffer for an io_req, * Unmap the data buffer and sense buffer for an io_req,
* also unmap and free the device-private scatter/gather list. * also unmap and free the device-private scatter/gather list.
@ -730,7 +736,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
fcpio_tag_id_dec(&tag, &id); fcpio_tag_id_dec(&tag, &id);
icmnd_cmpl = &desc->u.icmnd_cmpl; icmnd_cmpl = &desc->u.icmnd_cmpl;
if (id >= FNIC_MAX_IO_REQ) { if (id >= fnic->fnic_max_tag_id) {
shost_printk(KERN_ERR, fnic->lport->host, shost_printk(KERN_ERR, fnic->lport->host,
"Tag out of range tag %x hdr status = %s\n", "Tag out of range tag %x hdr status = %s\n",
id, fnic_fcpio_status_to_str(hdr_status)); id, fnic_fcpio_status_to_str(hdr_status));
@ -818,38 +824,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
xfer_len -= icmnd_cmpl->residual; xfer_len -= icmnd_cmpl->residual;
/*
* If queue_full, then try to reduce queue depth for all
* LUNS on the target. Todo: this should be accompanied
* by a periodic queue_depth rampup based on successful
* IO completion.
*/
if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
struct scsi_device *t_sdev;
int qd = 0;
shost_for_each_device(t_sdev, sc->device->host) {
if (t_sdev->id != sc->device->id)
continue;
if (t_sdev->queue_depth > 1) {
qd = scsi_track_queue_full
(t_sdev,
t_sdev->queue_depth - 1);
if (qd == -1)
qd = t_sdev->host->cmd_per_lun;
shost_printk(KERN_INFO,
fnic->lport->host,
"scsi[%d:%d:%d:%d"
"] queue full detected,"
"new depth = %d\n",
t_sdev->host->host_no,
t_sdev->channel,
t_sdev->id, t_sdev->lun,
t_sdev->queue_depth);
}
}
}
break; break;
case FCPIO_TIMEOUT: /* request was timed out */ case FCPIO_TIMEOUT: /* request was timed out */
@ -939,7 +913,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id); fcpio_tag_id_dec(&tag, &id);
if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) { if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
shost_printk(KERN_ERR, fnic->lport->host, shost_printk(KERN_ERR, fnic->lport->host,
"Tag out of range tag %x hdr status = %s\n", "Tag out of range tag %x hdr status = %s\n",
id, fnic_fcpio_status_to_str(hdr_status)); id, fnic_fcpio_status_to_str(hdr_status));
@ -988,9 +962,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
return; return;
} }
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = hdr_status; CMD_ABTS_STATUS(sc) = hdr_status;
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n", "abts cmpl recd. id %d status %s\n",
@ -1148,23 +1120,25 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
{ {
unsigned int i; int i;
struct fnic_io_req *io_req; struct fnic_io_req *io_req;
unsigned long flags = 0; unsigned long flags = 0;
struct scsi_cmnd *sc; struct scsi_cmnd *sc;
spinlock_t *io_lock; spinlock_t *io_lock;
unsigned long start_time = 0; unsigned long start_time = 0;
for (i = 0; i < FNIC_MAX_IO_REQ; i++) { for (i = 0; i < fnic->fnic_max_tag_id; i++) {
if (i == exclude_id) if (i == exclude_id)
continue; continue;
sc = scsi_host_find_tag(fnic->lport->host, i); io_lock = fnic_io_lock_tag(fnic, i);
if (!sc)
continue;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags); spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, i);
if (!sc) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
io_req = (struct fnic_io_req *)CMD_SP(sc); io_req = (struct fnic_io_req *)CMD_SP(sc);
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
@ -1236,7 +1210,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
fcpio_tag_id_dec(&desc->hdr.tag, &id); fcpio_tag_id_dec(&desc->hdr.tag, &id);
id &= FNIC_TAG_MASK; id &= FNIC_TAG_MASK;
if (id >= FNIC_MAX_IO_REQ) if (id >= fnic->fnic_max_tag_id)
return; return;
sc = scsi_host_find_tag(fnic->lport->host, id); sc = scsi_host_find_tag(fnic->lport->host, id);
@ -1340,14 +1314,15 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
if (fnic->in_remove) if (fnic->in_remove)
return; return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
abt_tag = tag; abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag); io_lock = fnic_io_lock_tag(fnic, tag);
if (!sc)
continue;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags); spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
io_req = (struct fnic_io_req *)CMD_SP(sc); io_req = (struct fnic_io_req *)CMD_SP(sc);
@ -1441,12 +1416,29 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
unsigned long flags; unsigned long flags;
struct scsi_cmnd *sc; struct scsi_cmnd *sc;
struct scsi_lun fc_lun; struct scsi_lun fc_lun;
struct fc_rport_libfc_priv *rdata = rport->dd_data; struct fc_rport_libfc_priv *rdata;
struct fc_lport *lport = rdata->local_port; struct fc_lport *lport;
struct fnic *fnic = lport_priv(lport); struct fnic *fnic;
struct fc_rport *cmd_rport; struct fc_rport *cmd_rport;
enum fnic_ioreq_state old_ioreq_state; enum fnic_ioreq_state old_ioreq_state;
if (!rport) {
printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
return;
}
rdata = rport->dd_data;
if (!rdata) {
printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
return;
}
lport = rdata->local_port;
if (!lport) {
printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
return;
}
fnic = lport_priv(lport);
FNIC_SCSI_DBG(KERN_DEBUG, FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host, "fnic_terminate_rport_io called" fnic->lport->host, "fnic_terminate_rport_io called"
" wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
@ -1456,18 +1448,21 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
if (fnic->in_remove) if (fnic->in_remove)
return; return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
abt_tag = tag; abt_tag = tag;
io_lock = fnic_io_lock_tag(fnic, tag);
spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, tag); sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc) if (!sc) {
spin_unlock_irqrestore(io_lock, flags);
continue; continue;
}
cmd_rport = starget_to_rport(scsi_target(sc->device)); cmd_rport = starget_to_rport(scsi_target(sc->device));
if (rport != cmd_rport) if (rport != cmd_rport) {
spin_unlock_irqrestore(io_lock, flags);
continue; continue;
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc); io_req = (struct fnic_io_req *)CMD_SP(sc);
@ -1680,13 +1675,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req->abts_done = NULL; io_req->abts_done = NULL;
/* fw did not complete abort, timed out */ /* fw did not complete abort, timed out */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED; ret = FAILED;
goto fnic_abort_cmd_end; goto fnic_abort_cmd_end;
} }
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
/* /*
* firmware completed the abort, check the status, * firmware completed the abort, check the status,
* free the io_req irrespective of failure or success * free the io_req irrespective of failure or success
@ -1784,17 +1781,18 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
DECLARE_COMPLETION_ONSTACK(tm_done); DECLARE_COMPLETION_ONSTACK(tm_done);
enum fnic_ioreq_state old_ioreq_state; enum fnic_ioreq_state old_ioreq_state;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
io_lock = fnic_io_lock_tag(fnic, tag);
spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, tag); sc = scsi_host_find_tag(fnic->lport->host, tag);
/* /*
* ignore this lun reset cmd or cmds that do not belong to * ignore this lun reset cmd or cmds that do not belong to
* this lun * this lun
*/ */
if (!sc || sc == lr_sc || sc->device != lun_dev) if (!sc || sc == lr_sc || sc->device != lun_dev) {
spin_unlock_irqrestore(io_lock, flags);
continue; continue;
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc); io_req = (struct fnic_io_req *)CMD_SP(sc);
@ -1823,6 +1821,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
continue; continue;
} }
if (io_req->abts_done)
shost_printk(KERN_ERR, fnic->lport->host,
"%s: io_req->abts_done is set state is %s\n",
__func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
old_ioreq_state = CMD_STATE(sc); old_ioreq_state = CMD_STATE(sc);
/* /*
* Any pending IO issued prior to reset is expected to be * Any pending IO issued prior to reset is expected to be
@ -1833,11 +1836,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
*/ */
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
if (io_req->abts_done)
shost_printk(KERN_ERR, fnic->lport->host,
"%s: io_req->abts_done is set state is %s\n",
__func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
BUG_ON(io_req->abts_done); BUG_ON(io_req->abts_done);
abt_tag = tag; abt_tag = tag;
@ -1890,12 +1888,13 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
io_req->abts_done = NULL; io_req->abts_done = NULL;
/* if abort is still pending with fw, fail */ /* if abort is still pending with fw, fail */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
ret = 1; ret = 1;
goto clean_pending_aborts_end; goto clean_pending_aborts_end;
} }
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_SP(sc) = NULL; CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
@ -2093,8 +2092,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
int_to_scsilun(sc->device->lun, &fc_lun); int_to_scsilun(sc->device->lun, &fc_lun);
/* /*
* Issue abort and terminate on the device reset request. * Issue abort and terminate on device reset request.
* If q'ing of the abort fails, retry issue it after a delay. * If q'ing of terminate fails, retry it after a delay.
*/ */
while (1) { while (1) {
spin_lock_irqsave(io_lock, flags); spin_lock_irqsave(io_lock, flags);
@ -2405,7 +2404,7 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
lun_dev = lr_sc->device; lun_dev = lr_sc->device;
/* walk again to check, if IOs are still pending in fw */ /* walk again to check, if IOs are still pending in fw */
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag); sc = scsi_host_find_tag(fnic->lport->host, tag);
/* /*
* ignore this lun reset cmd or cmds that do not belong to * ignore this lun reset cmd or cmds that do not belong to

View file

@ -54,8 +54,8 @@
#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000 #define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000
#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000 #define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000
#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256 #define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 1
#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096 #define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 2048
#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0
#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000

View file

@ -54,7 +54,7 @@
#include "hpsa.h" #include "hpsa.h"
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
#define HPSA_DRIVER_VERSION "2.0.2-1" #define HPSA_DRIVER_VERSION "3.4.0-1"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa" #define HPSA "hpsa"
@ -89,13 +89,14 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334D},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
@ -107,7 +108,19 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,} {0,}
@ -125,24 +138,35 @@ static struct board_type products[] = {
{0x3245103C, "Smart Array P410i", &SA5_access}, {0x3245103C, "Smart Array P410i", &SA5_access},
{0x3247103C, "Smart Array P411", &SA5_access}, {0x3247103C, "Smart Array P411", &SA5_access},
{0x3249103C, "Smart Array P812", &SA5_access}, {0x3249103C, "Smart Array P812", &SA5_access},
{0x324a103C, "Smart Array P712m", &SA5_access}, {0x324A103C, "Smart Array P712m", &SA5_access},
{0x324b103C, "Smart Array P711m", &SA5_access}, {0x324B103C, "Smart Array P711m", &SA5_access},
{0x3350103C, "Smart Array P222", &SA5_access}, {0x3350103C, "Smart Array P222", &SA5_access},
{0x3351103C, "Smart Array P420", &SA5_access}, {0x3351103C, "Smart Array P420", &SA5_access},
{0x3352103C, "Smart Array P421", &SA5_access}, {0x3352103C, "Smart Array P421", &SA5_access},
{0x3353103C, "Smart Array P822", &SA5_access}, {0x3353103C, "Smart Array P822", &SA5_access},
{0x334D103C, "Smart Array P822se", &SA5_access},
{0x3354103C, "Smart Array P420i", &SA5_access}, {0x3354103C, "Smart Array P420i", &SA5_access},
{0x3355103C, "Smart Array P220i", &SA5_access}, {0x3355103C, "Smart Array P220i", &SA5_access},
{0x3356103C, "Smart Array P721m", &SA5_access}, {0x3356103C, "Smart Array P721m", &SA5_access},
{0x1920103C, "Smart Array", &SA5_access}, {0x1921103C, "Smart Array P830i", &SA5_access},
{0x1921103C, "Smart Array", &SA5_access}, {0x1922103C, "Smart Array P430", &SA5_access},
{0x1922103C, "Smart Array", &SA5_access}, {0x1923103C, "Smart Array P431", &SA5_access},
{0x1923103C, "Smart Array", &SA5_access}, {0x1924103C, "Smart Array P830", &SA5_access},
{0x1924103C, "Smart Array", &SA5_access}, {0x1926103C, "Smart Array P731m", &SA5_access},
{0x1925103C, "Smart Array", &SA5_access}, {0x1928103C, "Smart Array P230i", &SA5_access},
{0x1926103C, "Smart Array", &SA5_access}, {0x1929103C, "Smart Array P530", &SA5_access},
{0x1928103C, "Smart Array", &SA5_access}, {0x21BD103C, "Smart Array", &SA5_access},
{0x334d103C, "Smart Array P822se", &SA5_access}, {0x21BE103C, "Smart Array", &SA5_access},
{0x21BF103C, "Smart Array", &SA5_access},
{0x21C0103C, "Smart Array", &SA5_access},
{0x21C1103C, "Smart Array", &SA5_access},
{0x21C2103C, "Smart Array", &SA5_access},
{0x21C3103C, "Smart Array", &SA5_access},
{0x21C4103C, "Smart Array", &SA5_access},
{0x21C5103C, "Smart Array", &SA5_access},
{0x21C7103C, "Smart Array", &SA5_access},
{0x21C8103C, "Smart Array", &SA5_access},
{0x21C9103C, "Smart Array", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access}, {0xFFFF103C, "Unknown Smart Array", &SA5_access},
}; };

View file

@ -2208,7 +2208,10 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
if (rsp_rc != 0) { if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
return -EIO; /* If failure is received, the host adapter is most likely going
through reset, return success so the caller will wait for the command
being cancelled to get returned */
return 0;
} }
sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
@ -2221,7 +2224,15 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
if (status != IBMVFC_MAD_SUCCESS) { if (status != IBMVFC_MAD_SUCCESS) {
sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
return -EIO; switch (status) {
case IBMVFC_MAD_DRIVER_FAILED:
case IBMVFC_MAD_CRQ_ERROR:
/* Host adapter most likely going through reset, return success to
the caller will wait for the command being cancelled to get returned */
return 0;
default:
return -EIO;
};
} }
sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");

View file

@ -241,7 +241,7 @@ static void gather_partition_info(void)
struct device_node *rootdn; struct device_node *rootdn;
const char *ppartition_name; const char *ppartition_name;
const unsigned int *p_number_ptr; const __be32 *p_number_ptr;
/* Retrieve information about this partition */ /* Retrieve information about this partition */
rootdn = of_find_node_by_path("/"); rootdn = of_find_node_by_path("/");
@ -255,7 +255,7 @@ static void gather_partition_info(void)
sizeof(partition_name)); sizeof(partition_name));
p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
if (p_number_ptr) if (p_number_ptr)
partition_number = *p_number_ptr; partition_number = of_read_number(p_number_ptr, 1);
of_node_put(rootdn); of_node_put(rootdn);
} }
@ -270,10 +270,11 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
strncpy(hostdata->madapter_info.partition_name, partition_name, strncpy(hostdata->madapter_info.partition_name, partition_name,
sizeof(hostdata->madapter_info.partition_name)); sizeof(hostdata->madapter_info.partition_name));
hostdata->madapter_info.partition_number = partition_number; hostdata->madapter_info.partition_number =
cpu_to_be32(partition_number);
hostdata->madapter_info.mad_version = 1; hostdata->madapter_info.mad_version = cpu_to_be32(1);
hostdata->madapter_info.os_type = 2; hostdata->madapter_info.os_type = cpu_to_be32(2);
} }
/** /**
@ -464,9 +465,9 @@ static int initialize_event_pool(struct event_pool *pool,
memset(&evt->crq, 0x00, sizeof(evt->crq)); memset(&evt->crq, 0x00, sizeof(evt->crq));
atomic_set(&evt->free, 1); atomic_set(&evt->free, 1);
evt->crq.valid = 0x80; evt->crq.valid = 0x80;
evt->crq.IU_length = sizeof(*evt->xfer_iu); evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
evt->crq.IU_data_ptr = pool->iu_token + evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
sizeof(*evt->xfer_iu) * i; sizeof(*evt->xfer_iu) * i);
evt->xfer_iu = pool->iu_storage + i; evt->xfer_iu = pool->iu_storage + i;
evt->hostdata = hostdata; evt->hostdata = hostdata;
evt->ext_list = NULL; evt->ext_list = NULL;
@ -588,7 +589,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
evt_struct->cmnd_done = NULL; evt_struct->cmnd_done = NULL;
evt_struct->sync_srp = NULL; evt_struct->sync_srp = NULL;
evt_struct->crq.format = format; evt_struct->crq.format = format;
evt_struct->crq.timeout = timeout; evt_struct->crq.timeout = cpu_to_be16(timeout);
evt_struct->done = done; evt_struct->done = done;
} }
@ -659,8 +660,8 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
scsi_for_each_sg(cmd, sg, nseg, i) { scsi_for_each_sg(cmd, sg, nseg, i) {
struct srp_direct_buf *descr = md + i; struct srp_direct_buf *descr = md + i;
descr->va = sg_dma_address(sg); descr->va = cpu_to_be64(sg_dma_address(sg));
descr->len = sg_dma_len(sg); descr->len = cpu_to_be32(sg_dma_len(sg));
descr->key = 0; descr->key = 0;
total_length += sg_dma_len(sg); total_length += sg_dma_len(sg);
} }
@ -703,13 +704,14 @@ static int map_sg_data(struct scsi_cmnd *cmd,
} }
indirect->table_desc.va = 0; indirect->table_desc.va = 0;
indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); indirect->table_desc.len = cpu_to_be32(sg_mapped *
sizeof(struct srp_direct_buf));
indirect->table_desc.key = 0; indirect->table_desc.key = 0;
if (sg_mapped <= MAX_INDIRECT_BUFS) { if (sg_mapped <= MAX_INDIRECT_BUFS) {
total_length = map_sg_list(cmd, sg_mapped, total_length = map_sg_list(cmd, sg_mapped,
&indirect->desc_list[0]); &indirect->desc_list[0]);
indirect->len = total_length; indirect->len = cpu_to_be32(total_length);
return 1; return 1;
} }
@ -731,9 +733,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
indirect->len = total_length; indirect->len = cpu_to_be32(total_length);
indirect->table_desc.va = evt_struct->ext_list_token; indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); indirect->table_desc.len = cpu_to_be32(sg_mapped *
sizeof(indirect->desc_list[0]));
memcpy(indirect->desc_list, evt_struct->ext_list, memcpy(indirect->desc_list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
return 1; return 1;
@ -849,7 +852,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
struct ibmvscsi_host_data *hostdata, struct ibmvscsi_host_data *hostdata,
unsigned long timeout) unsigned long timeout)
{ {
u64 *crq_as_u64 = (u64 *) &evt_struct->crq; __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
int request_status = 0; int request_status = 0;
int rc; int rc;
int srp_req = 0; int srp_req = 0;
@ -920,8 +923,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
add_timer(&evt_struct->timer); add_timer(&evt_struct->timer);
} }
if ((rc = rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { be64_to_cpu(crq_as_u64[1]));
if (rc != 0) {
list_del(&evt_struct->list); list_del(&evt_struct->list);
del_timer(&evt_struct->timer); del_timer(&evt_struct->timer);
@ -987,15 +991,16 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
memcpy(cmnd->sense_buffer, memcpy(cmnd->sense_buffer,
rsp->data, rsp->data,
rsp->sense_data_len); be32_to_cpu(rsp->sense_data_len));
unmap_cmd_data(&evt_struct->iu.srp.cmd, unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct, evt_struct,
evt_struct->hostdata->dev); evt_struct->hostdata->dev);
if (rsp->flags & SRP_RSP_FLAG_DOOVER) if (rsp->flags & SRP_RSP_FLAG_DOOVER)
scsi_set_resid(cmnd, rsp->data_out_res_cnt); scsi_set_resid(cmnd,
be32_to_cpu(rsp->data_out_res_cnt));
else if (rsp->flags & SRP_RSP_FLAG_DIOVER) else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
scsi_set_resid(cmnd, rsp->data_in_res_cnt); scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
} }
if (evt_struct->cmnd_done) if (evt_struct->cmnd_done)
@ -1037,7 +1042,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
srp_cmd->opcode = SRP_CMD; srp_cmd->opcode = SRP_CMD;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
srp_cmd->lun = ((u64) lun) << 48; srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);
if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
if (!firmware_has_feature(FW_FEATURE_CMO)) if (!firmware_has_feature(FW_FEATURE_CMO))
@ -1062,9 +1067,10 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
if ((in_fmt == SRP_DATA_DESC_INDIRECT || if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
out_fmt == SRP_DATA_DESC_INDIRECT) && out_fmt == SRP_DATA_DESC_INDIRECT) &&
indirect->table_desc.va == 0) { indirect->table_desc.va == 0) {
indirect->table_desc.va = evt_struct->crq.IU_data_ptr + indirect->table_desc.va =
cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
offsetof(struct srp_cmd, add_data) + offsetof(struct srp_cmd, add_data) +
offsetof(struct srp_indirect_buf, desc_list); offsetof(struct srp_indirect_buf, desc_list));
} }
return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
@ -1158,7 +1164,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
* request_limit could have been set to -1 by this client. * request_limit could have been set to -1 by this client.
*/ */
atomic_set(&hostdata->request_limit, atomic_set(&hostdata->request_limit,
evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
/* If we had any pending I/Os, kick them */ /* If we had any pending I/Os, kick them */
scsi_unblock_requests(hostdata->host); scsi_unblock_requests(hostdata->host);
@ -1184,8 +1190,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
login = &evt_struct->iu.srp.login_req; login = &evt_struct->iu.srp.login_req;
memset(login, 0, sizeof(*login)); memset(login, 0, sizeof(*login));
login->opcode = SRP_LOGIN_REQ; login->opcode = SRP_LOGIN_REQ;
login->req_it_iu_len = sizeof(union srp_iu); login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
spin_lock_irqsave(hostdata->host->host_lock, flags); spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 0, since this is negotiated in /* Start out with a request limit of 0, since this is negotiated in
@ -1214,12 +1221,13 @@ static void capabilities_rsp(struct srp_event_struct *evt_struct)
dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
evt_struct->xfer_iu->mad.capabilities.common.status); evt_struct->xfer_iu->mad.capabilities.common.status);
} else { } else {
if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) if (hostdata->caps.migration.common.server_support !=
cpu_to_be16(SERVER_SUPPORTS_CAP))
dev_info(hostdata->dev, "Partition migration not supported\n"); dev_info(hostdata->dev, "Partition migration not supported\n");
if (client_reserve) { if (client_reserve) {
if (hostdata->caps.reserve.common.server_support == if (hostdata->caps.reserve.common.server_support ==
SERVER_SUPPORTS_CAP) cpu_to_be16(SERVER_SUPPORTS_CAP))
dev_info(hostdata->dev, "Client reserve enabled\n"); dev_info(hostdata->dev, "Client reserve enabled\n");
else else
dev_info(hostdata->dev, "Client reserve not supported\n"); dev_info(hostdata->dev, "Client reserve not supported\n");
@ -1251,9 +1259,9 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
req = &evt_struct->iu.mad.capabilities; req = &evt_struct->iu.mad.capabilities;
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
hostdata->caps.flags = CAP_LIST_SUPPORTED; hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
if (hostdata->client_migrated) if (hostdata->client_migrated)
hostdata->caps.flags |= CLIENT_MIGRATED; hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
sizeof(hostdata->caps.name)); sizeof(hostdata->caps.name));
@ -1264,22 +1272,31 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
req->common.type = VIOSRP_CAPABILITIES_TYPE; req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
req->buffer = hostdata->caps_addr; req->buffer = cpu_to_be64(hostdata->caps_addr);
hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; hostdata->caps.migration.common.cap_type =
hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); cpu_to_be32(MIGRATION_CAPABILITIES);
hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; hostdata->caps.migration.common.length =
hostdata->caps.migration.ecl = 1; cpu_to_be16(sizeof(hostdata->caps.migration));
hostdata->caps.migration.common.server_support =
cpu_to_be16(SERVER_SUPPORTS_CAP);
hostdata->caps.migration.ecl = cpu_to_be32(1);
if (client_reserve) { if (client_reserve) {
hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; hostdata->caps.reserve.common.cap_type =
hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); cpu_to_be32(RESERVATION_CAPABILITIES);
hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; hostdata->caps.reserve.common.length =
hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; cpu_to_be16(sizeof(hostdata->caps.reserve));
req->common.length = sizeof(hostdata->caps); hostdata->caps.reserve.common.server_support =
cpu_to_be16(SERVER_SUPPORTS_CAP);
hostdata->caps.reserve.type =
cpu_to_be32(CLIENT_RESERVE_SCSI_2);
req->common.length =
cpu_to_be16(sizeof(hostdata->caps));
} else } else
req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
sizeof(hostdata->caps.reserve));
spin_lock_irqsave(hostdata->host->host_lock, flags); spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@ -1297,7 +1314,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
static void fast_fail_rsp(struct srp_event_struct *evt_struct) static void fast_fail_rsp(struct srp_event_struct *evt_struct)
{ {
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
if (status == VIOSRP_MAD_NOT_SUPPORTED) if (status == VIOSRP_MAD_NOT_SUPPORTED)
dev_err(hostdata->dev, "fast_fail not supported in server\n"); dev_err(hostdata->dev, "fast_fail not supported in server\n");
@ -1334,8 +1351,8 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
fast_fail_mad = &evt_struct->iu.mad.fast_fail; fast_fail_mad = &evt_struct->iu.mad.fast_fail;
memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
fast_fail_mad->common.length = sizeof(*fast_fail_mad); fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
spin_lock_irqsave(hostdata->host->host_lock, flags); spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
@ -1362,15 +1379,15 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
"host partition %s (%d), OS %d, max io %u\n", "host partition %s (%d), OS %d, max io %u\n",
hostdata->madapter_info.srp_version, hostdata->madapter_info.srp_version,
hostdata->madapter_info.partition_name, hostdata->madapter_info.partition_name,
hostdata->madapter_info.partition_number, be32_to_cpu(hostdata->madapter_info.partition_number),
hostdata->madapter_info.os_type, be32_to_cpu(hostdata->madapter_info.os_type),
hostdata->madapter_info.port_max_txu[0]); be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
if (hostdata->madapter_info.port_max_txu[0]) if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors = hostdata->host->max_sectors =
hostdata->madapter_info.port_max_txu[0] >> 9; be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
if (hostdata->madapter_info.os_type == 3 && if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
hostdata->madapter_info.srp_version); hostdata->madapter_info.srp_version);
@ -1379,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
} }
if (hostdata->madapter_info.os_type == 3) { if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
enable_fast_fail(hostdata); enable_fast_fail(hostdata);
return; return;
} }
@ -1414,9 +1431,9 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
req = &evt_struct->iu.mad.adapter_info; req = &evt_struct->iu.mad.adapter_info;
memset(req, 0x00, sizeof(*req)); memset(req, 0x00, sizeof(*req));
req->common.type = VIOSRP_ADAPTER_INFO_TYPE; req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
req->common.length = sizeof(hostdata->madapter_info); req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
req->buffer = hostdata->adapter_info_addr; req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
spin_lock_irqsave(hostdata->host->host_lock, flags); spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@ -1501,7 +1518,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
/* Set up an abort SRP command */ /* Set up an abort SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->opcode = SRP_TSK_MGMT; tsk_mgmt->opcode = SRP_TSK_MGMT;
tsk_mgmt->lun = ((u64) lun) << 48; tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
tsk_mgmt->task_tag = (u64) found_evt; tsk_mgmt->task_tag = (u64) found_evt;
@ -1624,7 +1641,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* Set up a lun reset SRP command */ /* Set up a lun reset SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->opcode = SRP_TSK_MGMT; tsk_mgmt->opcode = SRP_TSK_MGMT;
tsk_mgmt->lun = ((u64) lun) << 48; tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
evt->sync_srp = &srp_rsp; evt->sync_srp = &srp_rsp;
@ -1735,8 +1752,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
{ {
long rc; long rc;
unsigned long flags; unsigned long flags;
/* The hypervisor copies our tag value here so no byteswapping */
struct srp_event_struct *evt_struct = struct srp_event_struct *evt_struct =
(struct srp_event_struct *)crq->IU_data_ptr; (__force struct srp_event_struct *)crq->IU_data_ptr;
switch (crq->valid) { switch (crq->valid) {
case 0xC0: /* initialization */ case 0xC0: /* initialization */
switch (crq->format) { switch (crq->format) {
@ -1792,18 +1810,18 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
*/ */
if (!valid_event_struct(&hostdata->pool, evt_struct)) { if (!valid_event_struct(&hostdata->pool, evt_struct)) {
dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
(void *)crq->IU_data_ptr); evt_struct);
return; return;
} }
if (atomic_read(&evt_struct->free)) { if (atomic_read(&evt_struct->free)) {
dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
(void *)crq->IU_data_ptr); evt_struct);
return; return;
} }
if (crq->format == VIOSRP_SRP_FORMAT) if (crq->format == VIOSRP_SRP_FORMAT)
atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
&hostdata->request_limit); &hostdata->request_limit);
del_timer(&evt_struct->timer); del_timer(&evt_struct->timer);
@ -1856,13 +1874,11 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
/* Set up a lun reset SRP command */ /* Set up a lun reset SRP command */
memset(host_config, 0x00, sizeof(*host_config)); memset(host_config, 0x00, sizeof(*host_config));
host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
host_config->common.length = length; host_config->common.length = cpu_to_be16(length);
host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
length,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(hostdata->dev, host_config->buffer)) { if (dma_mapping_error(hostdata->dev, addr)) {
if (!firmware_has_feature(FW_FEATURE_CMO)) if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(hostdata->dev, dev_err(hostdata->dev,
"dma_mapping error getting host config\n"); "dma_mapping error getting host config\n");
@ -1870,6 +1886,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
return -1; return -1;
} }
host_config->buffer = cpu_to_be64(addr);
init_completion(&evt_struct->comp); init_completion(&evt_struct->comp);
spin_lock_irqsave(hostdata->host->host_lock, flags); spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);

View file

@ -75,9 +75,9 @@ struct viosrp_crq {
u8 format; /* SCSI vs out-of-band */ u8 format; /* SCSI vs out-of-band */
u8 reserved; u8 reserved;
u8 status; /* non-scsi failure? (e.g. DMA failure) */ u8 status; /* non-scsi failure? (e.g. DMA failure) */
u16 timeout; /* in seconds */ __be16 timeout; /* in seconds */
u16 IU_length; /* in bytes */ __be16 IU_length; /* in bytes */
u64 IU_data_ptr; /* the TCE for transferring data */ __be64 IU_data_ptr; /* the TCE for transferring data */
}; };
/* MADs are Management requests above and beyond the IUs defined in the SRP /* MADs are Management requests above and beyond the IUs defined in the SRP
@ -124,10 +124,10 @@ enum viosrp_capability_flag {
* Common MAD header * Common MAD header
*/ */
struct mad_common { struct mad_common {
u32 type; __be32 type;
u16 status; __be16 status;
u16 length; __be16 length;
u64 tag; __be64 tag;
}; };
/* /*
@ -139,23 +139,23 @@ struct mad_common {
*/ */
struct viosrp_empty_iu { struct viosrp_empty_iu {
struct mad_common common; struct mad_common common;
u64 buffer; __be64 buffer;
u32 port; __be32 port;
}; };
struct viosrp_error_log { struct viosrp_error_log {
struct mad_common common; struct mad_common common;
u64 buffer; __be64 buffer;
}; };
struct viosrp_adapter_info { struct viosrp_adapter_info {
struct mad_common common; struct mad_common common;
u64 buffer; __be64 buffer;
}; };
struct viosrp_host_config { struct viosrp_host_config {
struct mad_common common; struct mad_common common;
u64 buffer; __be64 buffer;
}; };
struct viosrp_fast_fail { struct viosrp_fast_fail {
@ -164,27 +164,27 @@ struct viosrp_fast_fail {
struct viosrp_capabilities { struct viosrp_capabilities {
struct mad_common common; struct mad_common common;
u64 buffer; __be64 buffer;
}; };
struct mad_capability_common { struct mad_capability_common {
u32 cap_type; __be32 cap_type;
u16 length; __be16 length;
u16 server_support; __be16 server_support;
}; };
struct mad_reserve_cap { struct mad_reserve_cap {
struct mad_capability_common common; struct mad_capability_common common;
u32 type; __be32 type;
}; };
struct mad_migration_cap { struct mad_migration_cap {
struct mad_capability_common common; struct mad_capability_common common;
u32 ecl; __be32 ecl;
}; };
struct capabilities{ struct capabilities{
u32 flags; __be32 flags;
char name[SRP_MAX_LOC_LEN]; char name[SRP_MAX_LOC_LEN];
char loc[SRP_MAX_LOC_LEN]; char loc[SRP_MAX_LOC_LEN];
struct mad_migration_cap migration; struct mad_migration_cap migration;
@ -208,10 +208,10 @@ union viosrp_iu {
struct mad_adapter_info_data { struct mad_adapter_info_data {
char srp_version[8]; char srp_version[8];
char partition_name[96]; char partition_name[96];
u32 partition_number; __be32 partition_number;
u32 mad_version; __be32 mad_version;
u32 os_type; __be32 os_type;
u32 port_max_txu[8]; /* per-port maximum transfer */ __be32 port_max_txu[8]; /* per-port maximum transfer */
}; };
#endif #endif

View file

@ -708,6 +708,7 @@ struct lpfc_hba {
uint32_t cfg_multi_ring_type; uint32_t cfg_multi_ring_type;
uint32_t cfg_poll; uint32_t cfg_poll;
uint32_t cfg_poll_tmo; uint32_t cfg_poll_tmo;
uint32_t cfg_task_mgmt_tmo;
uint32_t cfg_use_msi; uint32_t cfg_use_msi;
uint32_t cfg_fcp_imax; uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_cpu_map;

View file

@ -1865,8 +1865,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
{ \ { \
if (val >= minval && val <= maxval) {\ if (val >= minval && val <= maxval) {\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
"3053 lpfc_" #attr " changed from %d to %d\n", \ "3053 lpfc_" #attr \
vport->cfg_##attr, val); \ " changed from %d (x%x) to %d (x%x)\n", \
vport->cfg_##attr, vport->cfg_##attr, \
val, val); \
vport->cfg_##attr = val;\ vport->cfg_##attr = val;\
return 0;\ return 0;\
}\ }\
@ -4011,8 +4013,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
# For [0], FCP commands are issued to Work Queues ina round robin fashion. # For [0], FCP commands are issued to Work Queues ina round robin fashion.
# For [1], FCP commands are issued to a Work Queue associated with the # For [1], FCP commands are issued to a Work Queue associated with the
# current CPU. # current CPU.
# It would be set to 1 by the driver if it's able to set up cpu affinity
# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
# roundrobin scheduling of FCP I/Os through WQs will be used.
*/ */
LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for " LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
"issuing commands [0] - Round Robin, [1] - Current CPU"); "issuing commands [0] - Round Robin, [1] - Current CPU");
/* /*
@ -4110,6 +4115,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
"Milliseconds driver will wait between polling FCP ring"); "Milliseconds driver will wait between polling FCP ring");
/* /*
# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
# to complete in seconds. Value range is [5,180], default value is 60.
*/
LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
"Maximum time to wait for task management commands to complete");
/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature # support this feature
# 0 = MSI disabled # 0 = MSI disabled
@ -4295,6 +4306,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_issue_reset, &dev_attr_issue_reset,
&dev_attr_lpfc_poll, &dev_attr_lpfc_poll,
&dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_poll_tmo,
&dev_attr_lpfc_task_mgmt_tmo,
&dev_attr_lpfc_use_msi, &dev_attr_lpfc_use_msi,
&dev_attr_lpfc_fcp_imax, &dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_cpu_map,
@ -5274,6 +5286,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_topology_init(phba, lpfc_topology); lpfc_topology_init(phba, lpfc_topology);
lpfc_link_speed_init(phba, lpfc_link_speed); lpfc_link_speed_init(phba, lpfc_link_speed);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_enable_rrq_init(phba, lpfc_enable_rrq);

View file

@ -317,6 +317,11 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
} }
spin_unlock_irqrestore(&phba->ct_ev_lock, flags); spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
iocb = &dd_data->context_un.iocb; iocb = &dd_data->context_un.iocb;
ndlp = iocb->ndlp; ndlp = iocb->ndlp;
rmp = iocb->rmp; rmp = iocb->rmp;
@ -387,6 +392,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
int request_nseg; int request_nseg;
int reply_nseg; int reply_nseg;
struct bsg_job_data *dd_data; struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val; uint32_t creg_val;
int rc = 0; int rc = 0;
int iocb_stat; int iocb_stat;
@ -501,14 +507,24 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
} }
iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (iocb_stat == IOCB_SUCCESS)
if (iocb_stat == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed yet */
if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */ return 0; /* done for now */
else if (iocb_stat == IOCB_BUSY) } else if (iocb_stat == IOCB_BUSY) {
rc = -EAGAIN; rc = -EAGAIN;
else } else {
rc = -EIO; rc = -EIO;
}
/* iocb failed so cleanup */ /* iocb failed so cleanup */
job->dd_data = NULL;
free_rmp: free_rmp:
lpfc_free_bsg_buffers(phba, rmp); lpfc_free_bsg_buffers(phba, rmp);
@ -577,6 +593,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
} }
spin_unlock_irqrestore(&phba->ct_ev_lock, flags); spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
rsp = &rspiocbq->iocb; rsp = &rspiocbq->iocb;
pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
prsp = (struct lpfc_dmabuf *)pcmd->list.next; prsp = (struct lpfc_dmabuf *)pcmd->list.next;
@ -639,6 +660,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
struct lpfc_iocbq *cmdiocbq; struct lpfc_iocbq *cmdiocbq;
uint16_t rpi = 0; uint16_t rpi = 0;
struct bsg_job_data *dd_data; struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val; uint32_t creg_val;
int rc = 0; int rc = 0;
@ -721,15 +743,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (rc == IOCB_SUCCESS) if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */ return 0; /* done for now */
else if (rc == IOCB_BUSY) } else if (rc == IOCB_BUSY) {
rc = -EAGAIN; rc = -EAGAIN;
else } else {
rc = -EIO; rc = -EIO;
}
/* iocb failed so cleanup */
job->dd_data = NULL;
linkdown_err: linkdown_err:
cmdiocbq->context1 = ndlp; cmdiocbq->context1 = ndlp;
lpfc_els_free_iocb(phba, cmdiocbq); lpfc_els_free_iocb(phba, cmdiocbq);
@ -1249,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct get_ct_event *event_req; struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply; struct get_ct_event_reply *event_reply;
struct lpfc_bsg_event *evt; struct lpfc_bsg_event *evt, *evt_next;
struct event_data *evt_dat = NULL; struct event_data *evt_dat = NULL;
unsigned long flags; unsigned long flags;
uint32_t rc = 0; uint32_t rc = 0;
@ -1269,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
event_reply = (struct get_ct_event_reply *) event_reply = (struct get_ct_event_reply *)
job->reply->reply_data.vendor_reply.vendor_rsp; job->reply->reply_data.vendor_reply.vendor_rsp;
spin_lock_irqsave(&phba->ct_ev_lock, flags); spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) { list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) { if (evt->reg_id == event_req->ev_reg_id) {
if (list_empty(&evt->events_to_get)) if (list_empty(&evt->events_to_get))
break; break;
@ -1370,6 +1402,11 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
} }
spin_unlock_irqrestore(&phba->ct_ev_lock, flags); spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
ndlp = dd_data->context_un.iocb.ndlp; ndlp = dd_data->context_un.iocb.ndlp;
cmp = cmdiocbq->context2; cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3; bmp = cmdiocbq->context3;
@ -1433,6 +1470,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
int rc = 0; int rc = 0;
struct lpfc_nodelist *ndlp = NULL; struct lpfc_nodelist *ndlp = NULL;
struct bsg_job_data *dd_data; struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val; uint32_t creg_val;
/* allocate our bsg tracking structure */ /* allocate our bsg tracking structure */
@ -1542,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
if (rc == IOCB_SUCCESS) if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */ return 0; /* done for now */
}
/* iocb failed so cleanup */
job->dd_data = NULL;
issue_ct_rsp_exit: issue_ct_rsp_exit:
lpfc_sli_release_iocbq(phba, ctiocb); lpfc_sli_release_iocbq(phba, ctiocb);
@ -5284,9 +5333,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
* remove it from the txq queue and call cancel iocbs. * remove it from the txq queue and call cancel iocbs.
* Otherwise, call abort iotag * Otherwise, call abort iotag
*/ */
cmdiocb = dd_data->context_un.iocb.cmdiocbq; cmdiocb = dd_data->context_un.iocb.cmdiocbq;
spin_lock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O abort window is still open */
if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return -EAGAIN;
}
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
list) { list) {
if (check_iocb == cmdiocb) { if (check_iocb == cmdiocb) {
@ -5296,8 +5351,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
} }
if (list_empty(&completions)) if (list_empty(&completions))
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
spin_unlock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->hbalock, flags);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
if (!list_empty(&completions)) { if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions, lpfc_sli_cancel_iocbs(phba, &completions,
IOSTAT_LOCAL_REJECT, IOSTAT_LOCAL_REJECT,
@ -5321,9 +5375,10 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
* remove it from the txq queue and call cancel iocbs. * remove it from the txq queue and call cancel iocbs.
* Otherwise, call abort iotag. * Otherwise, call abort iotag.
*/ */
cmdiocb = dd_data->context_un.menlo.cmdiocbq; cmdiocb = dd_data->context_un.menlo.cmdiocbq;
spin_lock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
list) { list) {
if (check_iocb == cmdiocb) { if (check_iocb == cmdiocb) {
@ -5333,8 +5388,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
} }
if (list_empty(&completions)) if (list_empty(&completions))
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
spin_unlock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->hbalock, flags);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
if (!list_empty(&completions)) { if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions, lpfc_sli_cancel_iocbs(phba, &completions,
IOSTAT_LOCAL_REJECT, IOSTAT_LOCAL_REJECT,

View file

@ -4437,6 +4437,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!ndlp) if (!ndlp)
return; return;
lpfc_issue_els_logo(vport, ndlp, 0); lpfc_issue_els_logo(vport, ndlp, 0);
mempool_free(pmb, phba->mbox_mem_pool);
} }
/* /*
@ -4456,7 +4457,15 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
int rc; int rc;
uint16_t rpi; uint16_t rpi;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"3366 RPI x%x needs to be "
"unregistered nlp_flag x%x "
"did x%x\n",
ndlp->nlp_rpi, ndlp->nlp_flag,
ndlp->nlp_DID);
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) { if (mbox) {
/* SLI4 ports require the physical rpi value. */ /* SLI4 ports require the physical rpi value. */

View file

@ -3031,10 +3031,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
phba->sli4_hba.scsi_xri_max); phba->sli4_hba.scsi_xri_max);
spin_lock_irq(&phba->scsi_buf_list_get_lock); spin_lock_irq(&phba->scsi_buf_list_get_lock);
spin_lock_irq(&phba->scsi_buf_list_put_lock); spin_lock(&phba->scsi_buf_list_put_lock);
list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
spin_unlock_irq(&phba->scsi_buf_list_put_lock); spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock); spin_unlock_irq(&phba->scsi_buf_list_get_lock);
if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
@ -3070,10 +3070,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
} }
spin_lock_irq(&phba->scsi_buf_list_get_lock); spin_lock_irq(&phba->scsi_buf_list_get_lock);
spin_lock_irq(&phba->scsi_buf_list_put_lock); spin_lock(&phba->scsi_buf_list_put_lock);
list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
spin_unlock_irq(&phba->scsi_buf_list_put_lock); spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock); spin_unlock_irq(&phba->scsi_buf_list_get_lock);
return 0; return 0;
@ -4859,6 +4859,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
struct lpfc_mqe *mqe; struct lpfc_mqe *mqe;
int longs; int longs;
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
/* Before proceed, wait for POST done and device ready */ /* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba); rc = lpfc_sli4_post_status_check(phba);
if (rc) if (rc)
@ -4902,15 +4905,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct lpfc_mbox_ext_buf_ctx)); sizeof(struct lpfc_mbox_ext_buf_ctx));
INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
/*
* We need to do a READ_CONFIG mailbox command here before
* calling lpfc_get_cfgparam. For VFs this will report the
* MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
* All of the resources allocated
* for this Port are tied to these values.
*/
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
phba->max_vpi = LPFC_MAX_VPI; phba->max_vpi = LPFC_MAX_VPI;
/* This will be set to correct value after the read_config mbox */ /* This will be set to correct value after the read_config mbox */
@ -7141,19 +7135,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.fcp_wq = NULL; phba->sli4_hba.fcp_wq = NULL;
} }
if (phba->pci_bar0_memmap_p) {
iounmap(phba->pci_bar0_memmap_p);
phba->pci_bar0_memmap_p = NULL;
}
if (phba->pci_bar2_memmap_p) {
iounmap(phba->pci_bar2_memmap_p);
phba->pci_bar2_memmap_p = NULL;
}
if (phba->pci_bar4_memmap_p) {
iounmap(phba->pci_bar4_memmap_p);
phba->pci_bar4_memmap_p = NULL;
}
/* Release FCP CQ mapping array */ /* Release FCP CQ mapping array */
if (phba->sli4_hba.fcp_cq_map != NULL) { if (phba->sli4_hba.fcp_cq_map != NULL) {
kfree(phba->sli4_hba.fcp_cq_map); kfree(phba->sli4_hba.fcp_cq_map);
@ -7942,9 +7923,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
* particular PCI BARs regions is dependent on the type of * particular PCI BARs regions is dependent on the type of
* SLI4 device. * SLI4 device.
*/ */
if (pci_resource_start(pdev, 0)) { if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
phba->pci_bar0_map = pci_resource_start(pdev, 0); phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
bar0map_len = pci_resource_len(pdev, 0); bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
/* /*
* Map SLI4 PCI Config Space Register base to a kernel virtual * Map SLI4 PCI Config Space Register base to a kernel virtual
@ -7958,6 +7939,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
"registers.\n"); "registers.\n");
goto out; goto out;
} }
phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
/* Set up BAR0 PCI config space register memory map */ /* Set up BAR0 PCI config space register memory map */
lpfc_sli4_bar0_register_memmap(phba, if_type); lpfc_sli4_bar0_register_memmap(phba, if_type);
} else { } else {
@ -7980,13 +7962,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
} }
if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
(pci_resource_start(pdev, 2))) { (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
/* /*
* Map SLI4 if type 0 HBA Control Register base to a kernel * Map SLI4 if type 0 HBA Control Register base to a kernel
* virtual address and setup the registers. * virtual address and setup the registers.
*/ */
phba->pci_bar1_map = pci_resource_start(pdev, 2); phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
bar1map_len = pci_resource_len(pdev, 2); bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
phba->sli4_hba.ctrl_regs_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p =
ioremap(phba->pci_bar1_map, bar1map_len); ioremap(phba->pci_bar1_map, bar1map_len);
if (!phba->sli4_hba.ctrl_regs_memmap_p) { if (!phba->sli4_hba.ctrl_regs_memmap_p) {
@ -7994,17 +7976,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
"ioremap failed for SLI4 HBA control registers.\n"); "ioremap failed for SLI4 HBA control registers.\n");
goto out_iounmap_conf; goto out_iounmap_conf;
} }
phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
lpfc_sli4_bar1_register_memmap(phba); lpfc_sli4_bar1_register_memmap(phba);
} }
if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
(pci_resource_start(pdev, 4))) { (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
/* /*
* Map SLI4 if type 0 HBA Doorbell Register base to a kernel * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
* virtual address and setup the registers. * virtual address and setup the registers.
*/ */
phba->pci_bar2_map = pci_resource_start(pdev, 4); phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
bar2map_len = pci_resource_len(pdev, 4); bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
phba->sli4_hba.drbl_regs_memmap_p = phba->sli4_hba.drbl_regs_memmap_p =
ioremap(phba->pci_bar2_map, bar2map_len); ioremap(phba->pci_bar2_map, bar2map_len);
if (!phba->sli4_hba.drbl_regs_memmap_p) { if (!phba->sli4_hba.drbl_regs_memmap_p) {
@ -8012,6 +7995,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
"ioremap failed for SLI4 HBA doorbell registers.\n"); "ioremap failed for SLI4 HBA doorbell registers.\n");
goto out_iounmap_ctrl; goto out_iounmap_ctrl;
} }
phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
if (error) if (error)
goto out_iounmap_all; goto out_iounmap_all;
@ -8405,7 +8389,8 @@ static int
lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
{ {
int i, idx, saved_chann, used_chann, cpu, phys_id; int i, idx, saved_chann, used_chann, cpu, phys_id;
int max_phys_id, num_io_channel, first_cpu; int max_phys_id, min_phys_id;
int num_io_channel, first_cpu, chan;
struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *cpup;
#ifdef CONFIG_X86 #ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo; struct cpuinfo_x86 *cpuinfo;
@ -8423,6 +8408,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
phba->sli4_hba.num_present_cpu)); phba->sli4_hba.num_present_cpu));
max_phys_id = 0; max_phys_id = 0;
min_phys_id = 0xff;
phys_id = 0; phys_id = 0;
num_io_channel = 0; num_io_channel = 0;
first_cpu = LPFC_VECTOR_MAP_EMPTY; first_cpu = LPFC_VECTOR_MAP_EMPTY;
@ -8446,9 +8432,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
if (cpup->phys_id > max_phys_id) if (cpup->phys_id > max_phys_id)
max_phys_id = cpup->phys_id; max_phys_id = cpup->phys_id;
if (cpup->phys_id < min_phys_id)
min_phys_id = cpup->phys_id;
cpup++; cpup++;
} }
phys_id = min_phys_id;
/* Now associate the HBA vectors with specific CPUs */ /* Now associate the HBA vectors with specific CPUs */
for (idx = 0; idx < vectors; idx++) { for (idx = 0; idx < vectors; idx++) {
cpup = phba->sli4_hba.cpu_map; cpup = phba->sli4_hba.cpu_map;
@ -8459,13 +8448,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
for (i = 1; i < max_phys_id; i++) { for (i = 1; i < max_phys_id; i++) {
phys_id++; phys_id++;
if (phys_id > max_phys_id) if (phys_id > max_phys_id)
phys_id = 0; phys_id = min_phys_id;
cpu = lpfc_find_next_cpu(phba, phys_id); cpu = lpfc_find_next_cpu(phba, phys_id);
if (cpu == LPFC_VECTOR_MAP_EMPTY) if (cpu == LPFC_VECTOR_MAP_EMPTY)
continue; continue;
goto found; goto found;
} }
/* Use round robin for scheduling */
phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
chan = 0;
cpup = phba->sli4_hba.cpu_map;
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
cpup->channel_id = chan;
cpup++;
chan++;
if (chan >= phba->cfg_fcp_io_channel)
chan = 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3329 Cannot set affinity:" "3329 Cannot set affinity:"
"Error mapping vector %d (%d)\n", "Error mapping vector %d (%d)\n",
@ -8503,7 +8504,7 @@ found:
/* Spread vector mapping across multple physical CPU nodes */ /* Spread vector mapping across multple physical CPU nodes */
phys_id++; phys_id++;
if (phys_id > max_phys_id) if (phys_id > max_phys_id)
phys_id = 0; phys_id = min_phys_id;
} }
/* /*
@ -8513,7 +8514,7 @@ found:
* Base the remaining IO channel assigned, to IO channels already * Base the remaining IO channel assigned, to IO channels already
* assigned to other CPUs on the same phys_id. * assigned to other CPUs on the same phys_id.
*/ */
for (i = 0; i <= max_phys_id; i++) { for (i = min_phys_id; i <= max_phys_id; i++) {
/* /*
* If there are no io channels already mapped to * If there are no io channels already mapped to
* this phys_id, just round robin thru the io_channels. * this phys_id, just round robin thru the io_channels.
@ -8595,10 +8596,11 @@ out:
if (num_io_channel != phba->sli4_hba.num_present_cpu) if (num_io_channel != phba->sli4_hba.num_present_cpu)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3333 Set affinity mismatch:" "3333 Set affinity mismatch:"
"%d chann != %d cpus: %d vactors\n", "%d chann != %d cpus: %d vectors\n",
num_io_channel, phba->sli4_hba.num_present_cpu, num_io_channel, phba->sli4_hba.num_present_cpu,
vectors); vectors);
/* Enable using cpu affinity for scheduling */
phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
return 1; return 1;
} }
@ -8689,9 +8691,12 @@ enable_msix_vectors:
cfg_fail_out: cfg_fail_out:
/* free the irq already requested */ /* free the irq already requested */
for (--index; index >= 0; index--) for (--index; index >= 0; index--) {
irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
vector, NULL);
free_irq(phba->sli4_hba.msix_entries[index].vector, free_irq(phba->sli4_hba.msix_entries[index].vector,
&phba->sli4_hba.fcp_eq_hdl[index]); &phba->sli4_hba.fcp_eq_hdl[index]);
}
msi_fail_out: msi_fail_out:
/* Unconfigure MSI-X capability structure */ /* Unconfigure MSI-X capability structure */
@ -8712,9 +8717,12 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
int index; int index;
/* Free up MSI-X multi-message vectors */ /* Free up MSI-X multi-message vectors */
for (index = 0; index < phba->cfg_fcp_io_channel; index++) for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
vector, NULL);
free_irq(phba->sli4_hba.msix_entries[index].vector, free_irq(phba->sli4_hba.msix_entries[index].vector,
&phba->sli4_hba.fcp_eq_hdl[index]); &phba->sli4_hba.fcp_eq_hdl[index]);
}
/* Disable MSI-X */ /* Disable MSI-X */
pci_disable_msix(phba->pcidev); pci_disable_msix(phba->pcidev);

View file

@ -926,10 +926,10 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
/* get all SCSI buffers need to repost to a local list */ /* get all SCSI buffers need to repost to a local list */
spin_lock_irq(&phba->scsi_buf_list_get_lock); spin_lock_irq(&phba->scsi_buf_list_get_lock);
spin_lock_irq(&phba->scsi_buf_list_put_lock); spin_lock(&phba->scsi_buf_list_put_lock);
list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
spin_unlock_irq(&phba->scsi_buf_list_put_lock); spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock); spin_unlock_irq(&phba->scsi_buf_list_get_lock);
/* post the list of scsi buffer sgls to port if available */ /* post the list of scsi buffer sgls to port if available */
@ -1000,9 +1000,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
} }
memset(psb->data, 0, phba->cfg_sg_dma_buf_size); memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
/* Page alignment is CRITICAL, double check to be sure */ /*
if (((unsigned long)(psb->data) & * 4K Page alignment is CRITICAL to BlockGuard, double check
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) { * to be sure.
*/
if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool, pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle); psb->data, psb->dma_handle);
kfree(psb); kfree(psb);
@ -1134,22 +1137,21 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{ {
struct lpfc_scsi_buf * lpfc_cmd = NULL; struct lpfc_scsi_buf * lpfc_cmd = NULL;
struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
unsigned long gflag = 0; unsigned long iflag = 0;
unsigned long pflag = 0;
spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
list); list);
if (!lpfc_cmd) { if (!lpfc_cmd) {
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); spin_lock(&phba->scsi_buf_list_put_lock);
list_splice(&phba->lpfc_scsi_buf_list_put, list_splice(&phba->lpfc_scsi_buf_list_put,
&phba->lpfc_scsi_buf_list_get); &phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
list_remove_head(scsi_buf_list_get, lpfc_cmd, list_remove_head(scsi_buf_list_get, lpfc_cmd,
struct lpfc_scsi_buf, list); struct lpfc_scsi_buf, list);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); spin_unlock(&phba->scsi_buf_list_put_lock);
} }
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
return lpfc_cmd; return lpfc_cmd;
} }
/** /**
@ -1167,11 +1169,10 @@ static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{ {
struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
unsigned long gflag = 0; unsigned long iflag = 0;
unsigned long pflag = 0;
int found = 0; int found = 0;
spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
&phba->lpfc_scsi_buf_list_get, list) { &phba->lpfc_scsi_buf_list_get, list) {
if (lpfc_test_rrq_active(phba, ndlp, if (lpfc_test_rrq_active(phba, ndlp,
@ -1182,11 +1183,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
break; break;
} }
if (!found) { if (!found) {
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); spin_lock(&phba->scsi_buf_list_put_lock);
list_splice(&phba->lpfc_scsi_buf_list_put, list_splice(&phba->lpfc_scsi_buf_list_put,
&phba->lpfc_scsi_buf_list_get); &phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); spin_unlock(&phba->scsi_buf_list_put_lock);
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
&phba->lpfc_scsi_buf_list_get, list) { &phba->lpfc_scsi_buf_list_get, list) {
if (lpfc_test_rrq_active( if (lpfc_test_rrq_active(
@ -1197,7 +1198,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
break; break;
} }
} }
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
if (!found) if (!found)
return NULL; return NULL;
return lpfc_cmd; return lpfc_cmd;
@ -3966,11 +3967,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
/* /*
* Check SLI validation that all the transfer was actually done * Check SLI validation that all the transfer was actually done
* (fcpi_parm should be zero). * (fcpi_parm should be zero). Apply check only to reads.
*/ */
} else if (fcpi_parm) { } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
"9029 FCP Data Transfer Check Error: " "9029 FCP Read Check Error Data: "
"x%x x%x x%x x%x x%x\n", "x%x x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl), be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId), be32_to_cpu(fcprsp->rspResId),
@ -4342,6 +4343,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
char tag[2]; char tag[2];
uint8_t *ptr; uint8_t *ptr;
bool sli4; bool sli4;
uint32_t fcpdl;
if (!pnode || !NLP_CHK_NODE_ACT(pnode)) if (!pnode || !NLP_CHK_NODE_ACT(pnode))
return; return;
@ -4389,8 +4391,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
iocb_cmd->ulpPU = PARM_READ_CHECK; iocb_cmd->ulpPU = PARM_READ_CHECK;
if (vport->cfg_first_burst_size && if (vport->cfg_first_burst_size &&
(pnode->nlp_flag & NLP_FIRSTBURST)) { (pnode->nlp_flag & NLP_FIRSTBURST)) {
piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl = scsi_bufflen(scsi_cmnd);
vport->cfg_first_burst_size; if (fcpdl < vport->cfg_first_burst_size)
piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
else
piocbq->iocb.un.fcpi.fcpi_XRdy =
vport->cfg_first_burst_size;
} }
fcp_cmnd->fcpCntl3 = WRITE_DATA; fcp_cmnd->fcpCntl3 = WRITE_DATA;
phba->fc4OutputRequests++; phba->fc4OutputRequests++;
@ -4878,6 +4884,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
goto out_unlock; goto out_unlock;
} }
/* Indicate the IO is being aborted by the driver. */
iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
/* /*
* The scsi command can not be in txq and it is in flight because the * The scsi command can not be in txq and it is in flight because the
* pCmd is still pointig at the SCSI command we have to abort. There * pCmd is still pointig at the SCSI command we have to abort. There
@ -5006,7 +5015,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
if (lpfc_cmd == NULL) if (lpfc_cmd == NULL)
return FAILED; return FAILED;
lpfc_cmd->timeout = 60; lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata; lpfc_cmd->rdata = rdata;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,

View file

@ -9831,6 +9831,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abort_cmd) != 0) abort_cmd) != 0)
continue; continue;
/*
* If the iocbq is already being aborted, don't take a second
* action, but do count it.
*/
if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
continue;
/* issue ABTS for this IOCB based on iotag */ /* issue ABTS for this IOCB based on iotag */
abtsiocb = lpfc_sli_get_iocbq(phba); abtsiocb = lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) { if (abtsiocb == NULL) {
@ -9838,6 +9845,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
continue; continue;
} }
/* indicate the IO is being aborted by the driver. */
iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
cmd = &iocbq->iocb; cmd = &iocbq->iocb;
abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
@ -9847,7 +9857,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
abtsiocb->iocb.ulpLe = 1; abtsiocb->iocb.ulpLe = 1;
abtsiocb->iocb.ulpClass = cmd->ulpClass; abtsiocb->iocb.ulpClass = cmd->ulpClass;
abtsiocb->vport = phba->pport; abtsiocb->vport = vport;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
@ -12233,7 +12243,6 @@ static void __iomem *
lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
{ {
struct pci_dev *pdev; struct pci_dev *pdev;
unsigned long bar_map, bar_map_len;
if (!phba->pcidev) if (!phba->pcidev)
return NULL; return NULL;
@ -12242,25 +12251,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
switch (pci_barset) { switch (pci_barset) {
case WQ_PCI_BAR_0_AND_1: case WQ_PCI_BAR_0_AND_1:
if (!phba->pci_bar0_memmap_p) {
bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
}
return phba->pci_bar0_memmap_p; return phba->pci_bar0_memmap_p;
case WQ_PCI_BAR_2_AND_3: case WQ_PCI_BAR_2_AND_3:
if (!phba->pci_bar2_memmap_p) {
bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
}
return phba->pci_bar2_memmap_p; return phba->pci_bar2_memmap_p;
case WQ_PCI_BAR_4_AND_5: case WQ_PCI_BAR_4_AND_5:
if (!phba->pci_bar4_memmap_p) {
bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
}
return phba->pci_bar4_memmap_p; return phba->pci_bar4_memmap_p;
default: default:
break; break;
@ -15808,7 +15802,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
void void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{ {
struct lpfc_fcf_pri *fcf_pri; struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP, lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2762 FCF (x%x) reached driver's book " "2762 FCF (x%x) reached driver's book "
@ -15818,7 +15812,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
} }
/* Clear the eligible FCF record index bmask */ /* Clear the eligible FCF record index bmask */
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
list) {
if (fcf_pri->fcf_rec.fcf_index == fcf_index) { if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
list_del_init(&fcf_pri->list); list_del_init(&fcf_pri->list);
break; break;

View file

@ -58,7 +58,7 @@ struct lpfc_iocbq {
IOCB_t iocb; /* IOCB cmd */ IOCB_t iocb; /* IOCB cmd */
uint8_t retry; /* retry counter for IOCB cmd - if needed */ uint8_t retry; /* retry counter for IOCB cmd - if needed */
uint16_t iocb_flag; uint32_t iocb_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */ #define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ #define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ #define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
@ -73,11 +73,11 @@ struct lpfc_iocbq {
#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ #define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */
#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ #define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */
#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ #define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */
#define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14 #define LPFC_FIP_ELS_ID_SHIFT 14
uint8_t rsvd2;
uint32_t drvrTimeout; /* driver timeout in seconds */ uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */ uint32_t fcp_wqidx; /* index to FCP work queue */
struct lpfc_vport *vport;/* virtual port pointer */ struct lpfc_vport *vport;/* virtual port pointer */

View file

@ -523,7 +523,7 @@ struct lpfc_sli4_hba {
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
uint8_t fw_func_mode; /* FW function protocol mode */ uint32_t fw_func_mode; /* FW function protocol mode */
uint32_t ulp0_mode; /* ULP0 protocol mode */ uint32_t ulp0_mode; /* ULP0 protocol mode */
uint32_t ulp1_mode; /* ULP1 protocol mode */ uint32_t ulp1_mode; /* ULP1 protocol mode */

View file

@ -18,7 +18,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.41" #define LPFC_DRIVER_VERSION "8.3.42"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */ /* Used for SLI 2/3 */

View file

@ -33,9 +33,9 @@
/* /*
* MegaRAID SAS Driver meta data * MegaRAID SAS Driver meta data
*/ */
#define MEGASAS_VERSION "06.600.18.00-rc1" #define MEGASAS_VERSION "06.700.06.00-rc1"
#define MEGASAS_RELDATE "May. 15, 2013" #define MEGASAS_RELDATE "Aug. 31, 2013"
#define MEGASAS_EXT_VERSION "Wed. May. 15 17:00:00 PDT 2013" #define MEGASAS_EXT_VERSION "Sat. Aug. 31 17:00:00 PDT 2013"
/* /*
* Device IDs * Device IDs
@ -170,6 +170,7 @@
#define MR_DCMD_CTRL_GET_INFO 0x01010000 #define MR_DCMD_CTRL_GET_INFO 0x01010000
#define MR_DCMD_LD_GET_LIST 0x03010000 #define MR_DCMD_LD_GET_LIST 0x03010000
#define MR_DCMD_LD_LIST_QUERY 0x03010100
#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 #define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
#define MR_FLUSH_CTRL_CACHE 0x01 #define MR_FLUSH_CTRL_CACHE 0x01
@ -345,6 +346,15 @@ enum MR_PD_QUERY_TYPE {
MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
}; };
enum MR_LD_QUERY_TYPE {
MR_LD_QUERY_TYPE_ALL = 0,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
MR_LD_QUERY_TYPE_USED_TGT_IDS = 2,
MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3,
MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4,
};
#define MR_EVT_CFG_CLEARED 0x0004 #define MR_EVT_CFG_CLEARED 0x0004
#define MR_EVT_LD_STATE_CHANGE 0x0051 #define MR_EVT_LD_STATE_CHANGE 0x0051
#define MR_EVT_PD_INSERTED 0x005b #define MR_EVT_PD_INSERTED 0x005b
@ -435,6 +445,14 @@ struct MR_LD_LIST {
} ldList[MAX_LOGICAL_DRIVES]; } ldList[MAX_LOGICAL_DRIVES];
} __packed; } __packed;
struct MR_LD_TARGETID_LIST {
u32 size;
u32 count;
u8 pad[3];
u8 targetId[MAX_LOGICAL_DRIVES];
};
/* /*
* SAS controller properties * SAS controller properties
*/ */
@ -474,21 +492,39 @@ struct megasas_ctrl_prop {
* a bit in the following structure. * a bit in the following structure.
*/ */
struct { struct {
u32 copyBackDisabled : 1; #if defined(__BIG_ENDIAN_BITFIELD)
u32 SMARTerEnabled : 1; u32 reserved:18;
u32 prCorrectUnconfiguredAreas : 1; u32 enableJBOD:1;
u32 useFdeOnly : 1; u32 disableSpinDownHS:1;
u32 disableNCQ : 1; u32 allowBootWithPinnedCache:1;
u32 SSDSMARTerEnabled : 1; u32 disableOnlineCtrlReset:1;
u32 SSDPatrolReadEnabled : 1; u32 enableSecretKeyControl:1;
u32 enableSpinDownUnconfigured : 1; u32 autoEnhancedImport:1;
u32 autoEnhancedImport : 1; u32 enableSpinDownUnconfigured:1;
u32 enableSecretKeyControl : 1; u32 SSDPatrolReadEnabled:1;
u32 disableOnlineCtrlReset : 1; u32 SSDSMARTerEnabled:1;
u32 allowBootWithPinnedCache : 1; u32 disableNCQ:1;
u32 disableSpinDownHS : 1; u32 useFdeOnly:1;
u32 enableJBOD : 1; u32 prCorrectUnconfiguredAreas:1;
u32 reserved :18; u32 SMARTerEnabled:1;
u32 copyBackDisabled:1;
#else
u32 copyBackDisabled:1;
u32 SMARTerEnabled:1;
u32 prCorrectUnconfiguredAreas:1;
u32 useFdeOnly:1;
u32 disableNCQ:1;
u32 SSDSMARTerEnabled:1;
u32 SSDPatrolReadEnabled:1;
u32 enableSpinDownUnconfigured:1;
u32 autoEnhancedImport:1;
u32 enableSecretKeyControl:1;
u32 disableOnlineCtrlReset:1;
u32 allowBootWithPinnedCache:1;
u32 disableSpinDownHS:1;
u32 enableJBOD:1;
u32 reserved:18;
#endif
} OnOffProperties; } OnOffProperties;
u8 autoSnapVDSpace; u8 autoSnapVDSpace;
u8 viewSpace; u8 viewSpace;
@ -802,6 +838,30 @@ struct megasas_ctrl_info {
u16 cacheMemorySize; /*7A2h */ u16 cacheMemorySize; /*7A2h */
struct { /*7A4h */ struct { /*7A4h */
#if defined(__BIG_ENDIAN_BITFIELD)
u32 reserved:11;
u32 supportUnevenSpans:1;
u32 dedicatedHotSparesLimited:1;
u32 headlessMode:1;
u32 supportEmulatedDrives:1;
u32 supportResetNow:1;
u32 realTimeScheduler:1;
u32 supportSSDPatrolRead:1;
u32 supportPerfTuning:1;
u32 disableOnlinePFKChange:1;
u32 supportJBOD:1;
u32 supportBootTimePFKChange:1;
u32 supportSetLinkSpeed:1;
u32 supportEmergencySpares:1;
u32 supportSuspendResumeBGops:1;
u32 blockSSDWriteCacheChange:1;
u32 supportShieldState:1;
u32 supportLdBBMInfo:1;
u32 supportLdPIType3:1;
u32 supportLdPIType2:1;
u32 supportLdPIType1:1;
u32 supportPIcontroller:1;
#else
u32 supportPIcontroller:1; u32 supportPIcontroller:1;
u32 supportLdPIType1:1; u32 supportLdPIType1:1;
u32 supportLdPIType2:1; u32 supportLdPIType2:1;
@ -827,6 +887,7 @@ struct megasas_ctrl_info {
u32 supportUnevenSpans:1; u32 supportUnevenSpans:1;
u32 reserved:11; u32 reserved:11;
#endif
} adapterOperations2; } adapterOperations2;
u8 driverVersion[32]; /*7A8h */ u8 driverVersion[32]; /*7A8h */
@ -863,7 +924,7 @@ struct megasas_ctrl_info {
* =============================== * ===============================
*/ */
#define MEGASAS_MAX_PD_CHANNELS 2 #define MEGASAS_MAX_PD_CHANNELS 2
#define MEGASAS_MAX_LD_CHANNELS 2 #define MEGASAS_MAX_LD_CHANNELS 1
#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ #define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
MEGASAS_MAX_LD_CHANNELS) MEGASAS_MAX_LD_CHANNELS)
#define MEGASAS_MAX_DEV_PER_CHANNEL 128 #define MEGASAS_MAX_DEV_PER_CHANNEL 128
@ -1051,9 +1112,15 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES { typedef union _MFI_CAPABILITIES {
struct { struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u32 reserved:30;
u32 support_additional_msix:1;
u32 support_fp_remote_lun:1;
#else
u32 support_fp_remote_lun:1; u32 support_fp_remote_lun:1;
u32 support_additional_msix:1; u32 support_additional_msix:1;
u32 reserved:30; u32 reserved:30;
#endif
} mfi_capabilities; } mfi_capabilities;
u32 reg; u32 reg;
} MFI_CAPABILITIES; } MFI_CAPABILITIES;
@ -1656,4 +1723,16 @@ struct megasas_mgmt_info {
int max_index; int max_index;
}; };
u8
MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
#endif /*LSI_MEGARAID_SAS_H */ #endif /*LSI_MEGARAID_SAS_H */

View file

@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* *
* FILE: megaraid_sas_base.c * FILE: megaraid_sas_base.c
* Version : 06.600.18.00-rc1 * Version : 06.700.06.00-rc1
* *
* Authors: LSI Corporation * Authors: LSI Corporation
* Sreenivas Bagalkote * Sreenivas Bagalkote
@ -92,6 +92,8 @@ MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
static int megasas_get_pd_list(struct megasas_instance *instance); static int megasas_get_pd_list(struct megasas_instance *instance);
static int megasas_ld_list_query(struct megasas_instance *instance,
u8 query_type);
static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_issue_init_mfi(struct megasas_instance *instance);
static int megasas_register_aen(struct megasas_instance *instance, static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word); u32 seq_num, u32 class_locale_word);
@ -374,13 +376,11 @@ static int
megasas_check_reset_xscale(struct megasas_instance *instance, megasas_check_reset_xscale(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs) struct megasas_register_set __iomem *regs)
{ {
u32 consumer;
consumer = *instance->consumer;
if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
(*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) { (le32_to_cpu(*instance->consumer) ==
MEGASAS_ADPRESET_INPROG_SIGN))
return 1; return 1;
}
return 0; return 0;
} }
@ -629,9 +629,10 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&instance->hba_lock, flags); spin_lock_irqsave(&instance->hba_lock, flags);
writel(0, &(regs)->inbound_high_queue_port); writel(upper_32_bits(frame_phys_addr),
writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_high_queue_port);
&(regs)->inbound_low_queue_port); writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
&(regs)->inbound_low_queue_port);
spin_unlock_irqrestore(&instance->hba_lock, flags); spin_unlock_irqrestore(&instance->hba_lock, flags);
} }
@ -879,8 +880,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
struct megasas_header *frame_hdr = &cmd->frame->hdr; struct megasas_header *frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF; frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
/* /*
* Issue the frame using inbound queue port * Issue the frame using inbound queue port
@ -944,10 +945,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
*/ */
abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd = MFI_CMD_ABORT;
abort_fr->cmd_status = 0xFF; abort_fr->cmd_status = 0xFF;
abort_fr->flags = 0; abort_fr->flags = cpu_to_le16(0);
abort_fr->abort_context = cmd_to_abort->index; abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; abort_fr->abort_mfi_phys_addr_lo =
abort_fr->abort_mfi_phys_addr_hi = 0; cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
abort_fr->abort_mfi_phys_addr_hi =
cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
cmd->sync_cmd = 1; cmd->sync_cmd = 1;
cmd->cmd_status = 0xFF; cmd->cmd_status = 0xFF;
@ -986,8 +989,8 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
if (sge_count) { if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) { scsi_for_each_sg(scp, os_sgl, sge_count, i) {
mfi_sgl->sge32[i].length = sg_dma_len(os_sgl); mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl); mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
} }
} }
return sge_count; return sge_count;
@ -1015,8 +1018,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
if (sge_count) { if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) { scsi_for_each_sg(scp, os_sgl, sge_count, i) {
mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
} }
} }
return sge_count; return sge_count;
@ -1043,10 +1046,11 @@ megasas_make_sgl_skinny(struct megasas_instance *instance,
if (sge_count) { if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) { scsi_for_each_sg(scp, os_sgl, sge_count, i) {
mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); mfi_sgl->sge_skinny[i].length =
cpu_to_le32(sg_dma_len(os_sgl));
mfi_sgl->sge_skinny[i].phys_addr = mfi_sgl->sge_skinny[i].phys_addr =
sg_dma_address(os_sgl); cpu_to_le64(sg_dma_address(os_sgl));
mfi_sgl->sge_skinny[i].flag = 0; mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
} }
} }
return sge_count; return sge_count;
@ -1155,8 +1159,8 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
pthru->cdb_len = scp->cmd_len; pthru->cdb_len = scp->cmd_len;
pthru->timeout = 0; pthru->timeout = 0;
pthru->pad_0 = 0; pthru->pad_0 = 0;
pthru->flags = flags; pthru->flags = cpu_to_le16(flags);
pthru->data_xfer_len = scsi_bufflen(scp); pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
@ -1168,18 +1172,18 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
if ((scp->request->timeout / HZ) > 0xFFFF) if ((scp->request->timeout / HZ) > 0xFFFF)
pthru->timeout = 0xFFFF; pthru->timeout = 0xFFFF;
else else
pthru->timeout = scp->request->timeout / HZ; pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
} }
/* /*
* Construct SGL * Construct SGL
*/ */
if (instance->flag_ieee == 1) { if (instance->flag_ieee == 1) {
pthru->flags |= MFI_FRAME_SGL64; pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
pthru->sge_count = megasas_make_sgl_skinny(instance, scp, pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
&pthru->sgl); &pthru->sgl);
} else if (IS_DMA64) { } else if (IS_DMA64) {
pthru->flags |= MFI_FRAME_SGL64; pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
pthru->sge_count = megasas_make_sgl64(instance, scp, pthru->sge_count = megasas_make_sgl64(instance, scp,
&pthru->sgl); &pthru->sgl);
} else } else
@ -1196,8 +1200,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
* Sense info specific * Sense info specific
*/ */
pthru->sense_len = SCSI_SENSE_BUFFERSIZE; pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
pthru->sense_buf_phys_addr_hi = 0; pthru->sense_buf_phys_addr_hi =
pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
pthru->sense_buf_phys_addr_lo =
cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
/* /*
* Compute the total number of frames this command consumes. FW uses * Compute the total number of frames this command consumes. FW uses
@ -1248,7 +1254,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
ldio->timeout = 0; ldio->timeout = 0;
ldio->reserved_0 = 0; ldio->reserved_0 = 0;
ldio->pad_0 = 0; ldio->pad_0 = 0;
ldio->flags = flags; ldio->flags = cpu_to_le16(flags);
ldio->start_lba_hi = 0; ldio->start_lba_hi = 0;
ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
@ -1256,52 +1262,59 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
* 6-byte READ(0x08) or WRITE(0x0A) cdb * 6-byte READ(0x08) or WRITE(0x0A) cdb
*/ */
if (scp->cmd_len == 6) { if (scp->cmd_len == 6) {
ldio->lba_count = (u32) scp->cmnd[4]; ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) | ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; ((u32) scp->cmnd[2] << 8) |
(u32) scp->cmnd[3]);
ldio->start_lba_lo &= 0x1FFFFF; ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
} }
/* /*
* 10-byte READ(0x28) or WRITE(0x2A) cdb * 10-byte READ(0x28) or WRITE(0x2A) cdb
*/ */
else if (scp->cmd_len == 10) { else if (scp->cmd_len == 10) {
ldio->lba_count = (u32) scp->cmnd[8] | ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
((u32) scp->cmnd[7] << 8); ((u32) scp->cmnd[7] << 8));
ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[3] << 16) |
((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; ((u32) scp->cmnd[4] << 8) |
(u32) scp->cmnd[5]);
} }
/* /*
* 12-byte READ(0xA8) or WRITE(0xAA) cdb * 12-byte READ(0xA8) or WRITE(0xAA) cdb
*/ */
else if (scp->cmd_len == 12) { else if (scp->cmd_len == 12) {
ldio->lba_count = ((u32) scp->cmnd[6] << 24) | ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[7] << 16) |
((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; ((u32) scp->cmnd[8] << 8) |
(u32) scp->cmnd[9]);
ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[3] << 16) |
((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; ((u32) scp->cmnd[4] << 8) |
(u32) scp->cmnd[5]);
} }
/* /*
* 16-byte READ(0x88) or WRITE(0x8A) cdb * 16-byte READ(0x88) or WRITE(0x8A) cdb
*/ */
else if (scp->cmd_len == 16) { else if (scp->cmd_len == 16) {
ldio->lba_count = ((u32) scp->cmnd[10] << 24) | ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
((u32) scp->cmnd[11] << 16) | ((u32) scp->cmnd[11] << 16) |
((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; ((u32) scp->cmnd[12] << 8) |
(u32) scp->cmnd[13]);
ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) | ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[7] << 16) |
((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; ((u32) scp->cmnd[8] << 8) |
(u32) scp->cmnd[9]);
ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) | ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[3] << 16) |
((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; ((u32) scp->cmnd[4] << 8) |
(u32) scp->cmnd[5]);
} }
@ -1309,11 +1322,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
* Construct SGL * Construct SGL
*/ */
if (instance->flag_ieee) { if (instance->flag_ieee) {
ldio->flags |= MFI_FRAME_SGL64; ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
ldio->sge_count = megasas_make_sgl_skinny(instance, scp, ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
&ldio->sgl); &ldio->sgl);
} else if (IS_DMA64) { } else if (IS_DMA64) {
ldio->flags |= MFI_FRAME_SGL64; ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
} else } else
ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
@ -1329,7 +1342,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
*/ */
ldio->sense_len = SCSI_SENSE_BUFFERSIZE; ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
ldio->sense_buf_phys_addr_hi = 0; ldio->sense_buf_phys_addr_hi = 0;
ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
/* /*
* Compute the total number of frames this command consumes. FW uses * Compute the total number of frames this command consumes. FW uses
@ -1400,20 +1413,32 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
ldio = (struct megasas_io_frame *)cmd->frame; ldio = (struct megasas_io_frame *)cmd->frame;
mfi_sgl = &ldio->sgl; mfi_sgl = &ldio->sgl;
sgcount = ldio->sge_count; sgcount = ldio->sge_count;
printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount); printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
} }
else { else {
pthru = (struct megasas_pthru_frame *) cmd->frame; pthru = (struct megasas_pthru_frame *) cmd->frame;
mfi_sgl = &pthru->sgl; mfi_sgl = &pthru->sgl;
sgcount = pthru->sge_count; sgcount = pthru->sge_count;
printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount); printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
} }
if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
for (n = 0; n < sgcount; n++){ for (n = 0; n < sgcount; n++){
if (IS_DMA64) if (IS_DMA64)
printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ; printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
le32_to_cpu(mfi_sgl->sge64[n].length),
le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
else else
printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ; printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
le32_to_cpu(mfi_sgl->sge32[n].length),
le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
} }
} }
printk(KERN_ERR "\n"); printk(KERN_ERR "\n");
@ -1674,11 +1699,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
spin_lock_irqsave(&instance->completion_lock, flags); spin_lock_irqsave(&instance->completion_lock, flags);
producer = *instance->producer; producer = le32_to_cpu(*instance->producer);
consumer = *instance->consumer; consumer = le32_to_cpu(*instance->consumer);
while (consumer != producer) { while (consumer != producer) {
context = instance->reply_queue[consumer]; context = le32_to_cpu(instance->reply_queue[consumer]);
if (context >= instance->max_fw_cmds) { if (context >= instance->max_fw_cmds) {
printk(KERN_ERR "Unexpected context value %x\n", printk(KERN_ERR "Unexpected context value %x\n",
context); context);
@ -1695,7 +1720,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
} }
} }
*instance->consumer = producer; *instance->consumer = cpu_to_le32(producer);
spin_unlock_irqrestore(&instance->completion_lock, flags); spin_unlock_irqrestore(&instance->completion_lock, flags);
@ -1716,7 +1741,7 @@ void megasas_do_ocr(struct megasas_instance *instance)
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
*instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN; *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
} }
instance->instancet->disable_intr(instance); instance->instancet->disable_intr(instance);
instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
@ -2186,6 +2211,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
struct megasas_header *hdr = &cmd->frame->hdr; struct megasas_header *hdr = &cmd->frame->hdr;
unsigned long flags; unsigned long flags;
struct fusion_context *fusion = instance->ctrl_context; struct fusion_context *fusion = instance->ctrl_context;
u32 opcode;
/* flag for the retry reset */ /* flag for the retry reset */
cmd->retry_for_fw_reset = 0; cmd->retry_for_fw_reset = 0;
@ -2287,9 +2313,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP: case MFI_CMD_SMP:
case MFI_CMD_STP: case MFI_CMD_STP:
case MFI_CMD_DCMD: case MFI_CMD_DCMD:
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
/* Check for LD map update */ /* Check for LD map update */
if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
(cmd->frame->dcmd.mbox.b[1] == 1)) { && (cmd->frame->dcmd.mbox.b[1] == 1)) {
fusion->fast_path_io = 0; fusion->fast_path_io = 0;
spin_lock_irqsave(instance->host->host_lock, flags); spin_lock_irqsave(instance->host->host_lock, flags);
if (cmd->frame->hdr.cmd_status != 0) { if (cmd->frame->hdr.cmd_status != 0) {
@ -2323,8 +2350,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
flags); flags);
break; break;
} }
if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { opcode == MR_DCMD_CTRL_EVENT_GET) {
spin_lock_irqsave(&poll_aen_lock, flags); spin_lock_irqsave(&poll_aen_lock, flags);
megasas_poll_wait_aen = 0; megasas_poll_wait_aen = 0;
spin_unlock_irqrestore(&poll_aen_lock, flags); spin_unlock_irqrestore(&poll_aen_lock, flags);
@ -2333,7 +2360,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
/* /*
* See if got an event notification * See if got an event notification
*/ */
if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
megasas_service_aen(instance, cmd); megasas_service_aen(instance, cmd);
else else
megasas_complete_int_cmd(instance, cmd); megasas_complete_int_cmd(instance, cmd);
@ -2606,7 +2633,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
PCI_DEVICE_ID_LSI_VERDE_ZCR)) { PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
*instance->consumer = *instance->consumer =
MEGASAS_ADPRESET_INPROG_SIGN; cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
} }
@ -2983,7 +3010,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
} }
memset(cmd->frame, 0, total_sz); memset(cmd->frame, 0, total_sz);
cmd->frame->io.context = cmd->index; cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0; cmd->frame->io.pad_0 = 0;
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
@ -3143,13 +3170,13 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF; dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1; dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0; dcmd->timeout = 0;
dcmd->pad_0 = 0; dcmd->pad_0 = 0;
dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
dcmd->opcode = MR_DCMD_PD_LIST_QUERY; dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
if (!megasas_issue_polled(instance, cmd)) { if (!megasas_issue_polled(instance, cmd)) {
ret = 0; ret = 0;
@ -3164,16 +3191,16 @@ megasas_get_pd_list(struct megasas_instance *instance)
pd_addr = ci->addr; pd_addr = ci->addr;
if ( ret == 0 && if ( ret == 0 &&
(ci->count < (le32_to_cpu(ci->count) <
(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
memset(instance->pd_list, 0, memset(instance->pd_list, 0,
MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
for (pd_index = 0; pd_index < ci->count; pd_index++) { for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
instance->pd_list[pd_addr->deviceId].tid = instance->pd_list[pd_addr->deviceId].tid =
pd_addr->deviceId; le16_to_cpu(pd_addr->deviceId);
instance->pd_list[pd_addr->deviceId].driveType = instance->pd_list[pd_addr->deviceId].driveType =
pd_addr->scsiDevType; pd_addr->scsiDevType;
instance->pd_list[pd_addr->deviceId].driveState = instance->pd_list[pd_addr->deviceId].driveState =
@ -3207,6 +3234,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
struct megasas_dcmd_frame *dcmd; struct megasas_dcmd_frame *dcmd;
struct MR_LD_LIST *ci; struct MR_LD_LIST *ci;
dma_addr_t ci_h = 0; dma_addr_t ci_h = 0;
u32 ld_count;
cmd = megasas_get_cmd(instance); cmd = megasas_get_cmd(instance);
@ -3233,12 +3261,12 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF; dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1; dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0; dcmd->timeout = 0;
dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->opcode = MR_DCMD_LD_GET_LIST; dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->pad_0 = 0; dcmd->pad_0 = 0;
if (!megasas_issue_polled(instance, cmd)) { if (!megasas_issue_polled(instance, cmd)) {
@ -3247,12 +3275,14 @@ megasas_get_ld_list(struct megasas_instance *instance)
ret = -1; ret = -1;
} }
ld_count = le32_to_cpu(ci->ldCount);
/* the following function will get the instance PD LIST */ /* the following function will get the instance PD LIST */
if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) { if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) {
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { for (ld_index = 0; ld_index < ld_count; ld_index++) {
if (ci->ldList[ld_index].state != 0) { if (ci->ldList[ld_index].state != 0) {
ids = ci->ldList[ld_index].ref.targetId; ids = ci->ldList[ld_index].ref.targetId;
instance->ld_ids[ids] = instance->ld_ids[ids] =
@ -3270,6 +3300,87 @@ megasas_get_ld_list(struct megasas_instance *instance)
return ret; return ret;
} }
/**
* megasas_ld_list_query - Returns FW's ld_list structure
* @instance: Adapter soft state
* @ld_list: ld_list structure
*
* Issues an internal command (DCMD) to get the FW's controller PD
* list structure. This information is mainly used to find out SYSTEM
* supported by the FW.
*/
static int
megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
{
int ret = 0, ld_index = 0, ids = 0;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
struct MR_LD_TARGETID_LIST *ci;
dma_addr_t ci_h = 0;
u32 tgtid_count;
cmd = megasas_get_cmd(instance);
if (!cmd) {
printk(KERN_WARNING
"megasas:(megasas_ld_list_query): Failed to get cmd\n");
return -ENOMEM;
}
dcmd = &cmd->frame->dcmd;
ci = pci_alloc_consistent(instance->pdev,
sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
if (!ci) {
printk(KERN_WARNING
"megasas: Failed to alloc mem for ld_list_query\n");
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->mbox.b[0] = query_type;
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->pad_0 = 0;
if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) {
ret = 0;
} else {
/* On failure, call older LD list DCMD */
ret = 1;
}
tgtid_count = le32_to_cpu(ci->count);
if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) {
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
ids = ci->targetId[ld_index];
instance->ld_ids[ids] = ci->targetId[ld_index];
}
}
pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
ci, ci_h);
megasas_return_cmd(instance, cmd);
return ret;
}
/** /**
* megasas_get_controller_info - Returns FW's controller structure * megasas_get_controller_info - Returns FW's controller structure
* @instance: Adapter soft state * @instance: Adapter soft state
@ -3313,13 +3424,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF; dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1; dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0; dcmd->timeout = 0;
dcmd->pad_0 = 0; dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
dcmd->opcode = MR_DCMD_CTRL_GET_INFO; dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
if (!megasas_issue_polled(instance, cmd)) { if (!megasas_issue_polled(instance, cmd)) {
ret = 0; ret = 0;
@ -3375,17 +3486,20 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
init_frame->context = context; init_frame->context = context;
initq_info->reply_queue_entries = instance->max_fw_cmds + 1; initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h; initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
initq_info->producer_index_phys_addr_lo = instance->producer_h; initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
initq_info->consumer_index_phys_addr_lo = instance->consumer_h; initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
init_frame->cmd = MFI_CMD_INIT; init_frame->cmd = MFI_CMD_INIT;
init_frame->cmd_status = 0xFF; init_frame->cmd_status = 0xFF;
init_frame->queue_info_new_phys_addr_lo = initq_info_h; init_frame->queue_info_new_phys_addr_lo =
cpu_to_le32(lower_32_bits(initq_info_h));
init_frame->queue_info_new_phys_addr_hi =
cpu_to_le32(upper_32_bits(initq_info_h));
init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
/* /*
* disable the intr before firing the init frame to FW * disable the intr before firing the init frame to FW
@ -3648,7 +3762,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
megasas_get_pd_list(instance); megasas_get_pd_list(instance);
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
megasas_get_ld_list(instance); if (megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
megasas_get_ld_list(instance);
ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
@ -3665,8 +3781,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
ctrl_info->max_strips_per_io; le16_to_cpu(ctrl_info->max_strips_per_io);
max_sectors_2 = ctrl_info->max_request_size; max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
@ -3675,14 +3791,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->is_imr = 0; instance->is_imr = 0;
dev_info(&instance->pdev->dev, "Controller type: MR," dev_info(&instance->pdev->dev, "Controller type: MR,"
"Memory size is: %dMB\n", "Memory size is: %dMB\n",
ctrl_info->memory_size); le16_to_cpu(ctrl_info->memory_size));
} else { } else {
instance->is_imr = 1; instance->is_imr = 1;
dev_info(&instance->pdev->dev, dev_info(&instance->pdev->dev,
"Controller type: iMR\n"); "Controller type: iMR\n");
} }
/* OnOffProperties are converted into CPU arch*/
le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
instance->disableOnlineCtrlReset = instance->disableOnlineCtrlReset =
ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
/* adapterOperations2 are converted into CPU arch*/
le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
instance->UnevenSpanSupport = instance->UnevenSpanSupport =
ctrl_info->adapterOperations2.supportUnevenSpans; ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) { if (instance->UnevenSpanSupport) {
@ -3696,7 +3816,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
} }
} }
instance->max_sectors_per_req = instance->max_num_sge * instance->max_sectors_per_req = instance->max_num_sge *
PAGE_SIZE / 512; PAGE_SIZE / 512;
if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
@ -3802,20 +3921,24 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0; dcmd->cmd_status = 0x0;
dcmd->sge_count = 1; dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0; dcmd->timeout = 0;
dcmd->pad_0 = 0; dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
dcmd->sgl.sge32[0].phys_addr = el_info_h; dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
megasas_issue_blocked_cmd(instance, cmd); megasas_issue_blocked_cmd(instance, cmd);
/* /*
* Copy the data back into callers buffer * Copy the data back into callers buffer
*/ */
memcpy(eli, el_info, sizeof(struct megasas_evt_log_info)); eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h); el_info, el_info_h);
@ -3862,6 +3985,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
if (instance->aen_cmd) { if (instance->aen_cmd) {
prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale);
/* /*
* A class whose enum value is smaller is inclusive of all * A class whose enum value is smaller is inclusive of all
@ -3874,7 +3998,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
* values * values
*/ */
if ((prev_aen.members.class <= curr_aen.members.class) && if ((prev_aen.members.class <= curr_aen.members.class) &&
!((prev_aen.members.locale & curr_aen.members.locale) ^ !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^
curr_aen.members.locale)) { curr_aen.members.locale)) {
/* /*
* Previously issued event registration includes * Previously issued event registration includes
@ -3882,7 +4006,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
*/ */
return 0; return 0;
} else { } else {
curr_aen.members.locale |= prev_aen.members.locale; curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale);
if (prev_aen.members.class < curr_aen.members.class) if (prev_aen.members.class < curr_aen.members.class)
curr_aen.members.class = prev_aen.members.class; curr_aen.members.class = prev_aen.members.class;
@ -3917,16 +4041,16 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0; dcmd->cmd_status = 0x0;
dcmd->sge_count = 1; dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0; dcmd->timeout = 0;
dcmd->pad_0 = 0; dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
dcmd->mbox.w[0] = cpu_to_le32(seq_num);
instance->last_seq_num = seq_num; instance->last_seq_num = seq_num;
dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
dcmd->mbox.w[0] = seq_num; dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
dcmd->mbox.w[1] = curr_aen.word;
dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
if (instance->aen_cmd != NULL) { if (instance->aen_cmd != NULL) {
megasas_return_cmd(instance, cmd); megasas_return_cmd(instance, cmd);
@ -3972,8 +4096,9 @@ static int megasas_start_aen(struct megasas_instance *instance)
class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG; class_locale.members.class = MR_EVT_CLASS_DEBUG;
return megasas_register_aen(instance, eli.newest_seq_num + 1, return megasas_register_aen(instance,
class_locale.word); le32_to_cpu(eli.newest_seq_num) + 1,
class_locale.word);
} }
/** /**
@ -4068,6 +4193,7 @@ megasas_set_dma_mask(struct pci_dev *pdev)
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
goto fail_set_dma_mask; goto fail_set_dma_mask;
} }
return 0; return 0;
fail_set_dma_mask: fail_set_dma_mask:
@ -4386,11 +4512,11 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0; dcmd->cmd_status = 0x0;
dcmd->sge_count = 0; dcmd->sge_count = 0;
dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
dcmd->timeout = 0; dcmd->timeout = 0;
dcmd->pad_0 = 0; dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0; dcmd->data_xfer_len = 0;
dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
megasas_issue_blocked_cmd(instance, cmd); megasas_issue_blocked_cmd(instance, cmd);
@ -4431,11 +4557,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0; dcmd->cmd_status = 0x0;
dcmd->sge_count = 0; dcmd->sge_count = 0;
dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
dcmd->timeout = 0; dcmd->timeout = 0;
dcmd->pad_0 = 0; dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0; dcmd->data_xfer_len = 0;
dcmd->opcode = opcode; dcmd->opcode = cpu_to_le32(opcode);
megasas_issue_blocked_cmd(instance, cmd); megasas_issue_blocked_cmd(instance, cmd);
@ -4850,10 +4976,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* alone separately * alone separately
*/ */
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
cmd->frame->hdr.context = cmd->index; cmd->frame->hdr.context = cpu_to_le32(cmd->index);
cmd->frame->hdr.pad_0 = 0; cmd->frame->hdr.pad_0 = 0;
cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
MFI_FRAME_SENSE64); MFI_FRAME_SGL64 |
MFI_FRAME_SENSE64));
/* /*
* The management interface between applications and the fw uses * The management interface between applications and the fw uses
@ -4887,8 +5014,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* We don't change the dma_coherent_mask, so * We don't change the dma_coherent_mask, so
* pci_alloc_consistent only returns 32bit addresses * pci_alloc_consistent only returns 32bit addresses
*/ */
kern_sge32[i].phys_addr = (u32) buf_handle; kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
kern_sge32[i].length = ioc->sgl[i].iov_len; kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
/* /*
* We created a kernel buffer corresponding to the * We created a kernel buffer corresponding to the
@ -4911,7 +5038,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
sense_ptr = sense_ptr =
(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
*sense_ptr = sense_handle; *sense_ptr = cpu_to_le32(sense_handle);
} }
/* /*
@ -4971,9 +5098,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
for (i = 0; i < ioc->sge_count; i++) { for (i = 0; i < ioc->sge_count; i++) {
if (kbuff_arr[i]) if (kbuff_arr[i])
dma_free_coherent(&instance->pdev->dev, dma_free_coherent(&instance->pdev->dev,
kern_sge32[i].length, le32_to_cpu(kern_sge32[i].length),
kbuff_arr[i], kbuff_arr[i],
kern_sge32[i].phys_addr); le32_to_cpu(kern_sge32[i].phys_addr));
} }
megasas_return_cmd(instance, cmd); megasas_return_cmd(instance, cmd);
@ -5327,7 +5454,7 @@ megasas_aen_polling(struct work_struct *work)
host = instance->host; host = instance->host;
if (instance->evt_detail) { if (instance->evt_detail) {
switch (instance->evt_detail->code) { switch (le32_to_cpu(instance->evt_detail->code)) {
case MR_EVT_PD_INSERTED: case MR_EVT_PD_INSERTED:
if (megasas_get_pd_list(instance) == 0) { if (megasas_get_pd_list(instance) == 0) {
for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
@ -5389,7 +5516,9 @@ megasas_aen_polling(struct work_struct *work)
case MR_EVT_LD_OFFLINE: case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED: case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED: case MR_EVT_LD_DELETED:
megasas_get_ld_list(instance); if (megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0; for (j = 0;
j < MEGASAS_MAX_DEV_PER_CHANNEL; j < MEGASAS_MAX_DEV_PER_CHANNEL;
@ -5399,7 +5528,7 @@ megasas_aen_polling(struct work_struct *work)
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host, sdev1 = scsi_device_lookup(host,
i + MEGASAS_MAX_LD_CHANNELS, MEGASAS_MAX_PD_CHANNELS + i,
j, j,
0); 0);
@ -5418,7 +5547,9 @@ megasas_aen_polling(struct work_struct *work)
doscan = 0; doscan = 0;
break; break;
case MR_EVT_LD_CREATED: case MR_EVT_LD_CREATED:
megasas_get_ld_list(instance); if (megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0; for (j = 0;
j < MEGASAS_MAX_DEV_PER_CHANNEL; j < MEGASAS_MAX_DEV_PER_CHANNEL;
@ -5427,14 +5558,14 @@ megasas_aen_polling(struct work_struct *work)
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host, sdev1 = scsi_device_lookup(host,
i+MEGASAS_MAX_LD_CHANNELS, MEGASAS_MAX_PD_CHANNELS + i,
j, 0); j, 0);
if (instance->ld_ids[ld_index] != if (instance->ld_ids[ld_index] !=
0xff) { 0xff) {
if (!sdev1) { if (!sdev1) {
scsi_add_device(host, scsi_add_device(host,
i + 2, MEGASAS_MAX_PD_CHANNELS + i,
j, 0); j, 0);
} }
} }
@ -5483,18 +5614,20 @@ megasas_aen_polling(struct work_struct *work)
} }
} }
megasas_get_ld_list(instance); if (megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
ld_index = ld_index =
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host, sdev1 = scsi_device_lookup(host,
i+MEGASAS_MAX_LD_CHANNELS, j, 0); MEGASAS_MAX_PD_CHANNELS + i, j, 0);
if (instance->ld_ids[ld_index] != 0xff) { if (instance->ld_ids[ld_index] != 0xff) {
if (!sdev1) { if (!sdev1) {
scsi_add_device(host, scsi_add_device(host,
i+2, MEGASAS_MAX_PD_CHANNELS + i,
j, 0); j, 0);
} else { } else {
scsi_device_put(sdev1); scsi_device_put(sdev1);
@ -5514,7 +5647,7 @@ megasas_aen_polling(struct work_struct *work)
return ; return ;
} }
seq_num = instance->evt_detail->seq_num + 1; seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
/* Register AEN with FW for latest sequence number plus 1 */ /* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0; class_locale.members.reserved = 0;

View file

@ -126,17 +126,17 @@ static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
} }
static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
{ {
return map->raidMap.arMapInfo[ar].pd[arm]; return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
} }
static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
{ {
return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
} }
static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
{ {
return map->raidMap.devHndlInfo[pd].curDevHdl; return map->raidMap.devHndlInfo[pd].curDevHdl;
} }
@ -148,7 +148,7 @@ u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
{ {
return map->raidMap.ldTgtIdToLd[ldTgtId]; return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
} }
static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@ -167,18 +167,22 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info; struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
struct MR_LD_RAID *raid;
int ldCount, num_lds;
u16 ld;
if (pFwRaidMap->totalSize !=
if (le32_to_cpu(pFwRaidMap->totalSize) !=
(sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
(sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) { (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
(unsigned int)((sizeof(struct MR_FW_RAID_MAP) - (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
sizeof(struct MR_LD_SPAN_MAP)) + sizeof(struct MR_LD_SPAN_MAP)) +
(sizeof(struct MR_LD_SPAN_MAP) * (sizeof(struct MR_LD_SPAN_MAP) *
pFwRaidMap->ldCount))); le32_to_cpu(pFwRaidMap->ldCount))));
printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
pFwRaidMap->totalSize); le32_to_cpu(pFwRaidMap->totalSize));
return 0; return 0;
} }
@ -187,6 +191,15 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
mr_update_load_balance_params(map, lbInfo); mr_update_load_balance_params(map, lbInfo);
num_lds = le32_to_cpu(map->raidMap.ldCount);
/*Convert Raid capability values to CPU arch */
for (ldCount = 0; ldCount < num_lds; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, map);
raid = MR_LdRaidGet(ld, map);
le32_to_cpus((u32 *)&raid->capability);
}
return 1; return 1;
} }
@ -200,23 +213,20 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
quad = &pSpanBlock->block_span_info.quad[j]; quad = &pSpanBlock->block_span_info.quad[j];
if (quad->diff == 0) if (le32_to_cpu(quad->diff) == 0)
return SPAN_INVALID; return SPAN_INVALID;
if (quad->logStart <= row && row <= quad->logEnd && if (le64_to_cpu(quad->logStart) <= row && row <=
(mega_mod64(row-quad->logStart, quad->diff)) == 0) { le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) { if (span_blk != NULL) {
u64 blk, debugBlk; u64 blk, debugBlk;
blk = blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
mega_div64_32(
(row-quad->logStart),
quad->diff);
debugBlk = blk; debugBlk = blk;
blk = (blk + quad->offsetInSpan) << blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
raid->stripeShift;
*span_blk = blk; *span_blk = blk;
} }
return span; return span;
@ -257,8 +267,8 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
for (span = 0; span < raid->spanDepth; span++) for (span = 0; span < raid->spanDepth; span++)
dev_dbg(&instance->pdev->dev, "Span=%x," dev_dbg(&instance->pdev->dev, "Span=%x,"
" number of quads=%x\n", span, " number of quads=%x\n", span,
map->raidMap.ldSpanMap[ld].spanBlock[span]. le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
block_span_info.noElements); block_span_info.noElements));
for (element = 0; element < MAX_QUAD_DEPTH; element++) { for (element = 0; element < MAX_QUAD_DEPTH; element++) {
span_set = &(ldSpanInfo[ld].span_set[element]); span_set = &(ldSpanInfo[ld].span_set[element]);
if (span_set->span_row_data_width == 0) if (span_set->span_row_data_width == 0)
@ -286,22 +296,22 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
(long unsigned int)span_set->data_strip_end); (long unsigned int)span_set->data_strip_end);
for (span = 0; span < raid->spanDepth; span++) { for (span = 0; span < raid->spanDepth; span++) {
if (map->raidMap.ldSpanMap[ld].spanBlock[span]. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
block_span_info.noElements >= block_span_info.noElements) >=
element + 1) { element + 1) {
quad = &map->raidMap.ldSpanMap[ld]. quad = &map->raidMap.ldSpanMap[ld].
spanBlock[span].block_span_info. spanBlock[span].block_span_info.
quad[element]; quad[element];
dev_dbg(&instance->pdev->dev, "Span=%x," dev_dbg(&instance->pdev->dev, "Span=%x,"
"Quad=%x, diff=%x\n", span, "Quad=%x, diff=%x\n", span,
element, quad->diff); element, le32_to_cpu(quad->diff));
dev_dbg(&instance->pdev->dev, dev_dbg(&instance->pdev->dev,
"offset_in_span=0x%08lx\n", "offset_in_span=0x%08lx\n",
(long unsigned int)quad->offsetInSpan); (long unsigned int)le64_to_cpu(quad->offsetInSpan));
dev_dbg(&instance->pdev->dev, dev_dbg(&instance->pdev->dev,
"logical start=0x%08lx, end=0x%08lx\n", "logical start=0x%08lx, end=0x%08lx\n",
(long unsigned int)quad->logStart, (long unsigned int)le64_to_cpu(quad->logStart),
(long unsigned int)quad->logEnd); (long unsigned int)le64_to_cpu(quad->logEnd));
} }
} }
} }
@ -348,23 +358,23 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
continue; continue;
for (span = 0; span < raid->spanDepth; span++) for (span = 0; span < raid->spanDepth; span++)
if (map->raidMap.ldSpanMap[ld].spanBlock[span]. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
block_span_info.noElements >= info+1) { block_span_info.noElements) >= info+1) {
quad = &map->raidMap.ldSpanMap[ld]. quad = &map->raidMap.ldSpanMap[ld].
spanBlock[span]. spanBlock[span].
block_span_info.quad[info]; block_span_info.quad[info];
if (quad->diff == 0) if (le32_to_cpu(quad->diff == 0))
return SPAN_INVALID; return SPAN_INVALID;
if (quad->logStart <= row && if (le64_to_cpu(quad->logStart) <= row &&
row <= quad->logEnd && row <= le64_to_cpu(quad->logEnd) &&
(mega_mod64(row - quad->logStart, (mega_mod64(row - le64_to_cpu(quad->logStart),
quad->diff)) == 0) { le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) { if (span_blk != NULL) {
u64 blk; u64 blk;
blk = mega_div64_32 blk = mega_div64_32
((row - quad->logStart), ((row - le64_to_cpu(quad->logStart)),
quad->diff); le32_to_cpu(quad->diff));
blk = (blk + quad->offsetInSpan) blk = (blk + le64_to_cpu(quad->offsetInSpan))
<< raid->stripeShift; << raid->stripeShift;
*span_blk = blk; *span_blk = blk;
} }
@ -415,8 +425,8 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
span_set_Row = mega_div64_32(span_set_Strip, span_set_Row = mega_div64_32(span_set_Strip,
span_set->span_row_data_width) * span_set->diff; span_set->span_row_data_width) * span_set->diff;
for (span = 0, span_offset = 0; span < raid->spanDepth; span++) for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
if (map->raidMap.ldSpanMap[ld].spanBlock[span]. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
block_span_info.noElements >= info+1) { block_span_info.noElements >= info+1)) {
if (strip_offset >= if (strip_offset >=
span_set->strip_offset[span]) span_set->strip_offset[span])
span_offset++; span_offset++;
@ -480,18 +490,18 @@ static u64 get_strip_from_row(struct megasas_instance *instance,
continue; continue;
for (span = 0; span < raid->spanDepth; span++) for (span = 0; span < raid->spanDepth; span++)
if (map->raidMap.ldSpanMap[ld].spanBlock[span]. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
block_span_info.noElements >= info+1) { block_span_info.noElements) >= info+1) {
quad = &map->raidMap.ldSpanMap[ld]. quad = &map->raidMap.ldSpanMap[ld].
spanBlock[span].block_span_info.quad[info]; spanBlock[span].block_span_info.quad[info];
if (quad->logStart <= row && if (le64_to_cpu(quad->logStart) <= row &&
row <= quad->logEnd && row <= le64_to_cpu(quad->logEnd) &&
mega_mod64((row - quad->logStart), mega_mod64((row - le64_to_cpu(quad->logStart)),
quad->diff) == 0) { le32_to_cpu(quad->diff)) == 0) {
strip = mega_div64_32 strip = mega_div64_32
(((row - span_set->data_row_start) (((row - span_set->data_row_start)
- quad->logStart), - le64_to_cpu(quad->logStart)),
quad->diff); le32_to_cpu(quad->diff));
strip *= span_set->span_row_data_width; strip *= span_set->span_row_data_width;
strip += span_set->data_strip_start; strip += span_set->data_strip_start;
strip += span_set->strip_offset[span]; strip += span_set->strip_offset[span];
@ -543,8 +553,8 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
span_set->span_row_data_width); span_set->span_row_data_width);
for (span = 0, span_offset = 0; span < raid->spanDepth; span++) for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
if (map->raidMap.ldSpanMap[ld].spanBlock[span]. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
block_span_info.noElements >= info+1) { block_span_info.noElements) >= info+1) {
if (strip_offset >= if (strip_offset >=
span_set->strip_offset[span]) span_set->strip_offset[span])
span_offset = span_offset =
@ -669,7 +679,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
} }
} }
*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm; physArm;
return retval; return retval;
@ -765,7 +775,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
} }
} }
*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm; physArm;
return retval; return retval;
@ -784,7 +794,7 @@ u8
MR_BuildRaidContext(struct megasas_instance *instance, MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info, struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context, struct RAID_CONTEXT *pRAID_Context,
struct MR_FW_RAID_MAP_ALL *map) struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
{ {
struct MR_LD_RAID *raid; struct MR_LD_RAID *raid;
u32 ld, stripSize, stripe_mask; u32 ld, stripSize, stripe_mask;
@ -965,7 +975,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
regSize += stripSize; regSize += stripSize;
} }
pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
pRAID_Context->regLockFlags = (isRead) ? pRAID_Context->regLockFlags = (isRead) ?
@ -974,9 +984,12 @@ MR_BuildRaidContext(struct megasas_instance *instance,
pRAID_Context->regLockFlags = (isRead) ? pRAID_Context->regLockFlags = (isRead) ?
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
pRAID_Context->VirtualDiskTgtId = raid->targetId; pRAID_Context->VirtualDiskTgtId = raid->targetId;
pRAID_Context->regLockRowLBA = regStart; pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
pRAID_Context->regLockLength = regSize; pRAID_Context->regLockLength = cpu_to_le32(regSize);
pRAID_Context->configSeqNum = raid->seqNum; pRAID_Context->configSeqNum = raid->seqNum;
/* save pointer to raid->LUN array */
*raidLUN = raid->LUN;
/*Get Phy Params only if FP capable, or else leave it to MR firmware /*Get Phy Params only if FP capable, or else leave it to MR firmware
to do the calculation.*/ to do the calculation.*/
@ -1047,8 +1060,8 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
raid = MR_LdRaidGet(ld, map); raid = MR_LdRaidGet(ld, map);
for (element = 0; element < MAX_QUAD_DEPTH; element++) { for (element = 0; element < MAX_QUAD_DEPTH; element++) {
for (span = 0; span < raid->spanDepth; span++) { for (span = 0; span < raid->spanDepth; span++) {
if (map->raidMap.ldSpanMap[ld].spanBlock[span]. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
block_span_info.noElements < block_span_info.noElements) <
element + 1) element + 1)
continue; continue;
span_set = &(ldSpanInfo[ld].span_set[element]); span_set = &(ldSpanInfo[ld].span_set[element]);
@ -1056,14 +1069,14 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
spanBlock[span].block_span_info. spanBlock[span].block_span_info.
quad[element]; quad[element];
span_set->diff = quad->diff; span_set->diff = le32_to_cpu(quad->diff);
for (count = 0, span_row_width = 0; for (count = 0, span_row_width = 0;
count < raid->spanDepth; count++) { count < raid->spanDepth; count++) {
if (map->raidMap.ldSpanMap[ld]. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
spanBlock[count]. spanBlock[count].
block_span_info. block_span_info.
noElements >= element + 1) { noElements) >= element + 1) {
span_set->strip_offset[count] = span_set->strip_offset[count] =
span_row_width; span_row_width;
span_row_width += span_row_width +=
@ -1077,9 +1090,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
} }
span_set->span_row_data_width = span_row_width; span_set->span_row_data_width = span_row_width;
span_row = mega_div64_32(((quad->logEnd - span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
quad->logStart) + quad->diff), le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
quad->diff); le32_to_cpu(quad->diff));
if (element == 0) { if (element == 0) {
span_set->log_start_lba = 0; span_set->log_start_lba = 0;
@ -1096,7 +1109,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
span_set->data_row_start = 0; span_set->data_row_start = 0;
span_set->data_row_end = span_set->data_row_end =
(span_row * quad->diff) - 1; (span_row * le32_to_cpu(quad->diff)) - 1;
} else { } else {
span_set_prev = &(ldSpanInfo[ld]. span_set_prev = &(ldSpanInfo[ld].
span_set[element - 1]); span_set[element - 1]);
@ -1122,7 +1135,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
span_set_prev->data_row_end + 1; span_set_prev->data_row_end + 1;
span_set->data_row_end = span_set->data_row_end =
span_set->data_row_start + span_set->data_row_start +
(span_row * quad->diff) - 1; (span_row * le32_to_cpu(quad->diff)) - 1;
} }
break; break;
} }

View file

@ -72,17 +72,6 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
int int
megasas_issue_polled(struct megasas_instance *instance, megasas_issue_polled(struct megasas_instance *instance,
struct megasas_cmd *cmd); struct megasas_cmd *cmd);
u8
MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_FW_RAID_MAP_ALL *map);
u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
void void
megasas_check_and_restore_queue_depth(struct megasas_instance *instance); megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
@ -626,23 +615,20 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
IOCInitMessage->MsgVersion = MPI2_VERSION; IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION; IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
IOCInitMessage->SystemRequestFrameSize = IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth; IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
IOCInitMessage->ReplyDescriptorPostQueueAddress = IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
fusion->reply_frames_desc_phys; IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
IOCInitMessage->SystemRequestFrameBaseAddress =
fusion->io_request_frames_phys;
IOCInitMessage->HostMSIxVectors = instance->msix_vectors; IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
init_frame = (struct megasas_init_frame *)cmd->frame; init_frame = (struct megasas_init_frame *)cmd->frame;
memset(init_frame, 0, MEGAMFI_FRAME_SIZE); memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
frame_hdr = &cmd->frame->hdr; frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF; frame_hdr->cmd_status = 0xFF;
frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
init_frame->cmd = MFI_CMD_INIT; init_frame->cmd = MFI_CMD_INIT;
init_frame->cmd_status = 0xFF; init_frame->cmd_status = 0xFF;
@ -652,17 +638,24 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
init_frame->driver_operations. init_frame->driver_operations.
mfi_capabilities.support_additional_msix = 1; mfi_capabilities.support_additional_msix = 1;
/* driver supports HA / Remote LUN over Fast Path interface */
init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
= 1;
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
init_frame->queue_info_new_phys_addr_lo = ioc_init_handle; init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle);
init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST); init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
req_desc = req_desc =
(union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
req_desc->Words = cmd->frame_phys_addr; req_desc->Words = 0;
req_desc->MFAIo.RequestFlags = req_desc->MFAIo.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_MFA << (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
cpu_to_le32s((u32 *)&req_desc->MFAIo);
req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr);
/* /*
* disable the intr before firing the init frame * disable the intr before firing the init frame
@ -753,13 +746,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF; dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1; dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0; dcmd->timeout = 0;
dcmd->pad_0 = 0; dcmd->pad_0 = 0;
dcmd->data_xfer_len = size_map_info; dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = size_map_info; dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
if (!megasas_issue_polled(instance, cmd)) if (!megasas_issue_polled(instance, cmd))
ret = 0; ret = 0;
@ -828,7 +821,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
map = fusion->ld_map[instance->map_id & 1]; map = fusion->ld_map[instance->map_id & 1];
num_lds = map->raidMap.ldCount; num_lds = le32_to_cpu(map->raidMap.ldCount);
dcmd = &cmd->frame->dcmd; dcmd = &cmd->frame->dcmd;
@ -856,15 +849,15 @@ megasas_sync_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF; dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1; dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_WRITE; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
dcmd->timeout = 0; dcmd->timeout = 0;
dcmd->pad_0 = 0; dcmd->pad_0 = 0;
dcmd->data_xfer_len = size_map_info; dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->mbox.b[0] = num_lds; dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = size_map_info; dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
instance->map_update_cmd = cmd; instance->map_update_cmd = cmd;
@ -1067,9 +1060,8 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
spin_lock_irqsave(&instance->hba_lock, flags); spin_lock_irqsave(&instance->hba_lock, flags);
writel(req_desc_lo, writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
&(regs)->inbound_low_queue_port); writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
writel(req_desc_hi, &(regs)->inbound_high_queue_port);
spin_unlock_irqrestore(&instance->hba_lock, flags); spin_unlock_irqrestore(&instance->hba_lock, flags);
} }
@ -1157,8 +1149,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
return sge_count; return sge_count;
scsi_for_each_sg(scp, os_sgl, sge_count, i) { scsi_for_each_sg(scp, os_sgl, sge_count, i) {
sgl_ptr->Length = sg_dma_len(os_sgl); sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
sgl_ptr->Address = sg_dma_address(os_sgl); sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
sgl_ptr->Flags = 0; sgl_ptr->Flags = 0;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
@ -1177,9 +1169,9 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
PCI_DEVICE_ID_LSI_INVADER) || PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == (instance->pdev->device ==
PCI_DEVICE_ID_LSI_FURY)) { PCI_DEVICE_ID_LSI_FURY)) {
if ((cmd->io_request->IoFlags & if ((le16_to_cpu(cmd->io_request->IoFlags) &
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
cmd->io_request->ChainOffset = cmd->io_request->ChainOffset =
fusion-> fusion->
chain_offset_io_request; chain_offset_io_request;
@ -1201,9 +1193,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sg_chain->Flags = sg_chain->Flags =
(IEEE_SGE_FLAGS_CHAIN_ELEMENT | (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION) sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
*(sge_count - sg_processed)); sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
sg_chain->Address = cmd->sg_frame_phys_addr;
sgl_ptr = sgl_ptr =
(struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
@ -1261,7 +1252,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
io_request->CDB.EEDP32.PrimaryReferenceTag = io_request->CDB.EEDP32.PrimaryReferenceTag =
cpu_to_be32(ref_tag); cpu_to_be32(ref_tag);
io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
io_request->IoFlags = 32; /* Specify 32-byte cdb */ io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
/* Transfer length */ /* Transfer length */
cdb[28] = (u8)((num_blocks >> 24) & 0xff); cdb[28] = (u8)((num_blocks >> 24) & 0xff);
@ -1271,19 +1262,19 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
/* set SCSI IO EEDPFlags */ /* set SCSI IO EEDPFlags */
if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
io_request->EEDPFlags = io_request->EEDPFlags = cpu_to_le16(
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
} else { } else {
io_request->EEDPFlags = io_request->EEDPFlags = cpu_to_le16(
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
} }
io_request->Control |= (0x4 << 26); io_request->Control |= cpu_to_le32((0x4 << 26));
io_request->EEDPBlockSize = scp->device->sector_size; io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
} else { } else {
/* Some drives don't support 16/12 byte CDB's, convert to 10 */ /* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) && if (((cdb_len == 12) || (cdb_len == 16)) &&
@ -1311,7 +1302,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[8] = (u8)(num_blocks & 0xff); cdb[8] = (u8)(num_blocks & 0xff);
cdb[7] = (u8)((num_blocks >> 8) & 0xff); cdb[7] = (u8)((num_blocks >> 8) & 0xff);
io_request->IoFlags = 10; /* Specify 10-byte cdb */ io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
cdb_len = 10; cdb_len = 10;
} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
/* Convert to 16 byte CDB for large LBA's */ /* Convert to 16 byte CDB for large LBA's */
@ -1349,7 +1340,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[11] = (u8)((num_blocks >> 16) & 0xff); cdb[11] = (u8)((num_blocks >> 16) & 0xff);
cdb[10] = (u8)((num_blocks >> 24) & 0xff); cdb[10] = (u8)((num_blocks >> 24) & 0xff);
io_request->IoFlags = 16; /* Specify 16-byte cdb */ io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
cdb_len = 16; cdb_len = 16;
} }
@ -1410,13 +1401,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
struct IO_REQUEST_INFO io_info; struct IO_REQUEST_INFO io_info;
struct fusion_context *fusion; struct fusion_context *fusion;
struct MR_FW_RAID_MAP_ALL *local_map_ptr; struct MR_FW_RAID_MAP_ALL *local_map_ptr;
u8 *raidLUN;
device_id = MEGASAS_DEV_INDEX(instance, scp); device_id = MEGASAS_DEV_INDEX(instance, scp);
fusion = instance->ctrl_context; fusion = instance->ctrl_context;
io_request = cmd->io_request; io_request = cmd->io_request;
io_request->RaidContext.VirtualDiskTgtId = device_id; io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
io_request->RaidContext.status = 0; io_request->RaidContext.status = 0;
io_request->RaidContext.exStatus = 0; io_request->RaidContext.exStatus = 0;
@ -1480,7 +1472,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
io_info.numBlocks = datalength; io_info.numBlocks = datalength;
io_info.ldTgtId = device_id; io_info.ldTgtId = device_id;
io_request->DataLength = scsi_bufflen(scp); io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
io_info.isRead = 1; io_info.isRead = 1;
@ -1494,7 +1486,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
} else { } else {
if (MR_BuildRaidContext(instance, &io_info, if (MR_BuildRaidContext(instance, &io_info,
&io_request->RaidContext, &io_request->RaidContext,
local_map_ptr)) local_map_ptr, &raidLUN))
fp_possible = io_info.fpOkForIo; fp_possible = io_info.fpOkForIo;
} }
@ -1520,8 +1512,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
io_request->RaidContext.Type = MPI2_TYPE_CUDA; io_request->RaidContext.Type = MPI2_TYPE_CUDA;
io_request->RaidContext.nseg = 0x1; io_request->RaidContext.nseg = 0x1;
io_request->IoFlags |= io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
io_request->RaidContext.regLockFlags |= io_request->RaidContext.regLockFlags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA | (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
MR_RL_FLAGS_SEQ_NUM_ENABLE); MR_RL_FLAGS_SEQ_NUM_ENABLE);
@ -1537,9 +1528,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
io_request->DevHandle = io_info.devHandle; io_request->DevHandle = io_info.devHandle;
/* populate the LUN field */
memcpy(io_request->LUN, raidLUN, 8);
} else { } else {
io_request->RaidContext.timeoutValue = io_request->RaidContext.timeoutValue =
local_map_ptr->raidMap.fpPdIoTimeoutSec; cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
cmd->request_desc->SCSIIO.RequestFlags = cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@ -1557,7 +1550,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_request->RaidContext.nseg = 0x1; io_request->RaidContext.nseg = 0x1;
} }
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
io_request->DevHandle = device_id; io_request->DevHandle = cpu_to_le16(device_id);
} /* Not FP */ } /* Not FP */
} }
@ -1579,6 +1572,11 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
u16 pd_index = 0; u16 pd_index = 0;
struct MR_FW_RAID_MAP_ALL *local_map_ptr; struct MR_FW_RAID_MAP_ALL *local_map_ptr;
struct fusion_context *fusion = instance->ctrl_context; struct fusion_context *fusion = instance->ctrl_context;
u8 span, physArm;
u16 devHandle;
u32 ld, arRef, pd;
struct MR_LD_RAID *raid;
struct RAID_CONTEXT *pRAID_Context;
io_request = cmd->io_request; io_request = cmd->io_request;
device_id = MEGASAS_DEV_INDEX(instance, scmd); device_id = MEGASAS_DEV_INDEX(instance, scmd);
@ -1586,6 +1584,9 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
+scmd->device->id; +scmd->device->id;
local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
/* Check if this is a system PD I/O */ /* Check if this is a system PD I/O */
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
@ -1623,15 +1624,62 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
scmd->request->timeout / HZ; scmd->request->timeout / HZ;
} }
} else { } else {
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
goto NonFastPath;
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io))
goto NonFastPath;
raid = MR_LdRaidGet(ld, local_map_ptr);
/* check if this LD is FP capable */
if (!(raid->capability.fpNonRWCapable))
/* not FP capable, send as non-FP */
goto NonFastPath;
/* get RAID_Context pointer */
pRAID_Context = &io_request->RaidContext;
/* set RAID context values */
pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd;
pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
pRAID_Context->regLockRowLBA = 0;
pRAID_Context->regLockLength = 0;
pRAID_Context->configSeqNum = raid->seqNum;
/* get the DevHandle for the PD (since this is
fpNonRWCapable, this is a single disk RAID0) */
span = physArm = 0;
arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
/* build request descriptor */
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
cmd->request_desc->SCSIIO.DevHandle = devHandle;
/* populate the LUN field */
memcpy(io_request->LUN, raid->LUN, 8);
/* build the raidScsiIO structure */
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
io_request->DevHandle = devHandle;
return;
NonFastPath:
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
io_request->DevHandle = device_id; io_request->DevHandle = cpu_to_le16(device_id);
cmd->request_desc->SCSIIO.RequestFlags = cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
} }
io_request->RaidContext.VirtualDiskTgtId = device_id; io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
io_request->LUN[1] = scmd->device->lun; io_request->LUN[1] = scmd->device->lun;
io_request->DataLength = scsi_bufflen(scmd);
} }
/** /**
@ -1670,7 +1718,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
* Just the CDB length,rest of the Flags are zero * Just the CDB length,rest of the Flags are zero
* This will be modified for FP in build_ldio_fusion * This will be modified for FP in build_ldio_fusion
*/ */
io_request->IoFlags = scp->cmd_len; io_request->IoFlags = cpu_to_le16(scp->cmd_len);
if (megasas_is_ldio(scp)) if (megasas_is_ldio(scp))
megasas_build_ldio_fusion(instance, scp, cmd); megasas_build_ldio_fusion(instance, scp, cmd);
@ -1695,17 +1743,17 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->RaidContext.numSGE = sge_count; io_request->RaidContext.numSGE = sge_count;
io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
if (scp->sc_data_direction == PCI_DMA_TODEVICE) if (scp->sc_data_direction == PCI_DMA_TODEVICE)
io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
io_request->Control |= MPI2_SCSIIO_CONTROL_READ; io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
io_request->SGLOffset0 = io_request->SGLOffset0 =
offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
io_request->SenseBufferLowAddress = cmd->sense_phys_addr; io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp; cmd->scmd = scp;
@ -1770,7 +1818,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
} }
req_desc = cmd->request_desc; req_desc = cmd->request_desc;
req_desc->SCSIIO.SMID = index; req_desc->SCSIIO.SMID = cpu_to_le16(index);
if (cmd->io_request->ChainOffset != 0 && if (cmd->io_request->ChainOffset != 0 &&
cmd->io_request->ChainOffset != 0xF) cmd->io_request->ChainOffset != 0xF)
@ -1832,7 +1880,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
num_completed = 0; num_completed = 0;
while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
smid = reply_desc->SMID; smid = le16_to_cpu(reply_desc->SMID);
cmd_fusion = fusion->cmd_list[smid - 1]; cmd_fusion = fusion->cmd_list[smid - 1];
@ -2050,12 +2098,12 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
SGL) / 4; SGL) / 4;
io_req->ChainOffset = fusion->chain_offset_mfi_pthru; io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME; mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME);
return 0; return 0;
} }
@ -2088,7 +2136,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
req_desc->SCSIIO.SMID = index; req_desc->SCSIIO.SMID = cpu_to_le16(index);
return req_desc; return req_desc;
} }

View file

@ -93,8 +93,13 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
*/ */
struct RAID_CONTEXT { struct RAID_CONTEXT {
#if defined(__BIG_ENDIAN_BITFIELD)
u8 nseg:4;
u8 Type:4;
#else
u8 Type:4; u8 Type:4;
u8 nseg:4; u8 nseg:4;
#endif
u8 resvd0; u8 resvd0;
u16 timeoutValue; u16 timeoutValue;
u8 regLockFlags; u8 regLockFlags;
@ -298,8 +303,13 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
* MPT RAID MFA IO Descriptor. * MPT RAID MFA IO Descriptor.
*/ */
struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
#if defined(__BIG_ENDIAN_BITFIELD)
u32 MessageAddress1:24; /* bits 31:8*/
u32 RequestFlags:8;
#else
u32 RequestFlags:8; u32 RequestFlags:8;
u32 MessageAddress1:24; /* bits 31:8*/ u32 MessageAddress1:24; /* bits 31:8*/
#endif
u32 MessageAddress2; /* bits 61:32 */ u32 MessageAddress2; /* bits 61:32 */
}; };
@ -518,6 +528,19 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_LD_RAID { struct MR_LD_RAID {
struct { struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u32 reserved4:7;
u32 fpNonRWCapable:1;
u32 fpReadAcrossStripe:1;
u32 fpWriteAcrossStripe:1;
u32 fpReadCapable:1;
u32 fpWriteCapable:1;
u32 encryptionType:8;
u32 pdPiMode:4;
u32 ldPiMode:4;
u32 reserved5:3;
u32 fpCapable:1;
#else
u32 fpCapable:1; u32 fpCapable:1;
u32 reserved5:3; u32 reserved5:3;
u32 ldPiMode:4; u32 ldPiMode:4;
@ -527,7 +550,9 @@ struct MR_LD_RAID {
u32 fpReadCapable:1; u32 fpReadCapable:1;
u32 fpWriteAcrossStripe:1; u32 fpWriteAcrossStripe:1;
u32 fpReadAcrossStripe:1; u32 fpReadAcrossStripe:1;
u32 reserved4:8; u32 fpNonRWCapable:1;
u32 reserved4:7;
#endif
} capability; } capability;
u32 reserved6; u32 reserved6;
u64 size; u64 size;
@ -551,7 +576,9 @@ struct MR_LD_RAID {
u32 reserved:31; u32 reserved:31;
} flags; } flags;
u8 reserved3[0x5C]; u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
u8 reserved3[0x80-0x2D]; /* 0x2D */
}; };
struct MR_LD_SPAN_MAP { struct MR_LD_SPAN_MAP {

View file

@ -1,5 +1,5 @@
# mpt3sas makefile # mpt3sas makefile
obj-m += mpt3sas.o obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
mpt3sas-y += mpt3sas_base.o \ mpt3sas-y += mpt3sas_base.o \
mpt3sas_config.o \ mpt3sas_config.o \
mpt3sas_scsih.o \ mpt3sas_scsih.o \

View file

@ -2420,14 +2420,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
} }
} }
if (modepage == 0x3F) { sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
sd_printk(KERN_ERR, sdkp, "No Caching mode page " goto defaults;
"present\n");
goto defaults;
} else if ((buffer[offset] & 0x3f) != modepage) {
sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
goto defaults;
}
Page_found: Page_found:
if (modepage == 8) { if (modepage == 8) {
sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);

View file

@ -177,6 +177,7 @@ enum {
MASK_TASK_RESPONSE = 0xFF00, MASK_TASK_RESPONSE = 0xFF00,
MASK_RSP_UPIU_RESULT = 0xFFFF, MASK_RSP_UPIU_RESULT = 0xFFFF,
MASK_QUERY_DATA_SEG_LEN = 0xFFFF, MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
MASK_RSP_EXCEPTION_EVENT = 0x10000, MASK_RSP_EXCEPTION_EVENT = 0x10000,
}; };

View file

@ -36,9 +36,11 @@
#include <linux/async.h> #include <linux/async.h>
#include "ufshcd.h" #include "ufshcd.h"
#include "unipro.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\ UTP_TASK_REQ_COMPL |\
UIC_POWER_MODE |\
UFSHCD_ERROR_MASK) UFSHCD_ERROR_MASK)
/* UIC command timeout, unit: ms */ /* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500 #define UIC_CMD_TIMEOUT 500
@ -56,6 +58,9 @@
/* Expose the flag value from utp_upiu_query.value */ /* Expose the flag value from utp_upiu_query.value */
#define MASK_QUERY_UPIU_FLAG_LOC 0xFF #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
/* Interrupt aggregation default timeout, unit: 40us */
#define INT_AGGR_DEF_TO 0x02
enum { enum {
UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1, UFSHCD_MAX_ID = 1,
@ -78,12 +83,6 @@ enum {
UFSHCD_INT_CLEAR, UFSHCD_INT_CLEAR,
}; };
/* Interrupt aggregation options */
enum {
INT_AGGR_RESET,
INT_AGGR_CONFIG,
};
/* /*
* ufshcd_wait_for_register - wait for register value to change * ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface * @hba - per-adapter interface
@ -237,6 +236,18 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
MASK_UIC_COMMAND_RESULT; MASK_UIC_COMMAND_RESULT;
} }
/**
* ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
* @hba: Pointer to adapter instance
*
* This function gets UIC command argument3
* Returns 0 on success, non zero value on error
*/
static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
{
return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
}
/** /**
* ufshcd_get_req_rsp - returns the TR response transaction type * ufshcd_get_req_rsp - returns the TR response transaction type
* @ucd_rsp_ptr: pointer to response UPIU * @ucd_rsp_ptr: pointer to response UPIU
@ -260,6 +271,20 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
} }
/*
* ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
* from response UPIU
* @ucd_rsp_ptr: pointer to response UPIU
*
* Return the data segment length.
*/
static inline unsigned int
ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
{
return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
MASK_RSP_UPIU_DATA_SEG_LEN;
}
/** /**
* ufshcd_is_exception_event - Check if the device raised an exception event * ufshcd_is_exception_event - Check if the device raised an exception event
* @ucd_rsp_ptr: pointer to response UPIU * @ucd_rsp_ptr: pointer to response UPIU
@ -276,30 +301,30 @@ static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
} }
/** /**
* ufshcd_config_int_aggr - Configure interrupt aggregation values. * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
* Currently there is no use case where we want to configure
* interrupt aggregation dynamically. So to configure interrupt
* aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
* INT_AGGR_TIMEOUT_VALUE are used.
* @hba: per adapter instance * @hba: per adapter instance
* @option: Interrupt aggregation option
*/ */
static inline void static inline void
ufshcd_config_int_aggr(struct ufs_hba *hba, int option) ufshcd_reset_intr_aggr(struct ufs_hba *hba)
{ {
switch (option) { ufshcd_writel(hba, INT_AGGR_ENABLE |
case INT_AGGR_RESET: INT_AGGR_COUNTER_AND_TIMER_RESET,
ufshcd_writel(hba, INT_AGGR_ENABLE | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
INT_AGGR_COUNTER_AND_TIMER_RESET, }
REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
break; /**
case INT_AGGR_CONFIG: * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | * @hba: per adapter instance
INT_AGGR_COUNTER_THRESHOLD_VALUE | * @cnt: Interrupt aggregation counter threshold
INT_AGGR_TIMEOUT_VALUE, * @tmout: Interrupt aggregation timeout value
REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); */
break; static inline void
} ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
{
ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
INT_AGGR_COUNTER_THLD_VAL(cnt) |
INT_AGGR_TIMEOUT_VAL(tmout),
REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
} }
/** /**
@ -355,7 +380,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
{ {
int len; int len;
if (lrbp->sense_buffer) { if (lrbp->sense_buffer &&
ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
memcpy(lrbp->sense_buffer, memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data, lrbp->ucd_rsp_ptr->sr.sense_data,
@ -445,6 +471,18 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
return false; return false;
} }
/**
* ufshcd_get_upmcrs - Get the power mode change request status
* @hba: Pointer to adapter instance
*
* This function gets the UPMCRS field of HCS register
* Returns value of UPMCRS field
*/
static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
{
return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
}
/** /**
* ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
* @hba: per adapter instance * @hba: per adapter instance
@ -1361,6 +1399,202 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
return ret; return ret;
} }
/**
* ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
* @hba: per adapter instance
* @attr_sel: uic command argument1
* @attr_set: attribute set type as uic command argument2
* @mib_val: setting value as uic command argument3
* @peer: indicate whether peer or local
*
* Returns 0 on success, non-zero value on failure
*/
int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer)
{
struct uic_command uic_cmd = {0};
static const char *const action[] = {
"dme-set",
"dme-peer-set"
};
const char *set = action[!!peer];
int ret;
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
uic_cmd.argument1 = attr_sel;
uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
uic_cmd.argument3 = mib_val;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
/**
* ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
* @hba: per adapter instance
* @attr_sel: uic command argument1
* @mib_val: the value of the attribute as returned by the UIC command
* @peer: indicate whether peer or local
*
* Returns 0 on success, non-zero value on failure
*/
int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer)
{
struct uic_command uic_cmd = {0};
static const char *const action[] = {
"dme-get",
"dme-peer-get"
};
const char *get = action[!!peer];
int ret;
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
uic_cmd.argument1 = attr_sel;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret) {
dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
get, UIC_GET_ATTR_ID(attr_sel), ret);
goto out;
}
if (mib_val)
*mib_val = uic_cmd.argument3;
out:
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
/**
* ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
* using DME_SET primitives.
* @hba: per adapter instance
* @mode: powr mode value
*
* Returns 0 on success, non-zero value on failure
*/
int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
struct uic_command uic_cmd = {0};
struct completion pwr_done;
unsigned long flags;
u8 status;
int ret;
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
init_completion(&pwr_done);
mutex_lock(&hba->uic_cmd_mutex);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->pwr_done = &pwr_done;
spin_unlock_irqrestore(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret) {
dev_err(hba->dev,
"pwr mode change with mode 0x%x uic error %d\n",
mode, ret);
goto out;
}
if (!wait_for_completion_timeout(hba->pwr_done,
msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
dev_err(hba->dev,
"pwr mode change with mode 0x%x completion timeout\n",
mode);
ret = -ETIMEDOUT;
goto out;
}
status = ufshcd_get_upmcrs(hba);
if (status != PWR_LOCAL) {
dev_err(hba->dev,
"pwr mode change failed, host umpcrs:0x%x\n",
status);
ret = (status != PWR_OK) ? status : -1;
}
out:
spin_lock_irqsave(hba->host->host_lock, flags);
hba->pwr_done = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
return ret;
}
/**
* ufshcd_config_max_pwr_mode - Set & Change power mode with
* maximum capability attribute information.
* @hba: per adapter instance
*
* Returns 0 on success, non-zero value on failure
*/
static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
{
enum {RX = 0, TX = 1};
u32 lanes[] = {1, 1};
u32 gear[] = {1, 1};
u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
int ret;
/* Get the connected lane count */
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
/*
* First, get the maximum gears of HS speed.
* If a zero value, it means there is no HSGEAR capability.
* Then, get the maximum gears of PWM speed.
*/
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
if (!gear[RX]) {
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
pwr[RX] = SLOWAUTO_MODE;
}
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
if (!gear[TX]) {
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
&gear[TX]);
pwr[TX] = SLOWAUTO_MODE;
}
/*
* Configure attributes for power mode change with below.
* - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
* - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
* - PA_HSSERIES
*/
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
if (pwr[RX] == FASTAUTO_MODE)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
if (pwr[TX] == FASTAUTO_MODE)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
if (ret)
dev_err(hba->dev,
"pwr_mode: power mode change failed %d\n", ret);
return ret;
}
/** /**
* ufshcd_complete_dev_init() - checks device readiness * ufshcd_complete_dev_init() - checks device readiness
* hba: per-adapter instance * hba: per-adapter instance
@ -1442,7 +1676,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
/* Configure interrupt aggregation */ /* Configure interrupt aggregation */
ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG); ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
/* Configure UTRL and UTMRL base address registers */ /* Configure UTRL and UTMRL base address registers */
ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
@ -1788,32 +2022,24 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
int result = 0; int result = 0;
switch (scsi_status) { switch (scsi_status) {
case SAM_STAT_CHECK_CONDITION:
ufshcd_copy_sense_data(lrbp);
case SAM_STAT_GOOD: case SAM_STAT_GOOD:
result |= DID_OK << 16 | result |= DID_OK << 16 |
COMMAND_COMPLETE << 8 | COMMAND_COMPLETE << 8 |
SAM_STAT_GOOD; scsi_status;
break;
case SAM_STAT_CHECK_CONDITION:
result |= DID_OK << 16 |
COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
ufshcd_copy_sense_data(lrbp);
break;
case SAM_STAT_BUSY:
result |= SAM_STAT_BUSY;
break; break;
case SAM_STAT_TASK_SET_FULL: case SAM_STAT_TASK_SET_FULL:
/* /*
* If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
* depth needs to be adjusted to the exact number of * depth needs to be adjusted to the exact number of
* outstanding commands the LUN can handle at any given time. * outstanding commands the LUN can handle at any given time.
*/ */
ufshcd_adjust_lun_qdepth(lrbp->cmd); ufshcd_adjust_lun_qdepth(lrbp->cmd);
result |= SAM_STAT_TASK_SET_FULL; case SAM_STAT_BUSY:
break;
case SAM_STAT_TASK_ABORTED: case SAM_STAT_TASK_ABORTED:
result |= SAM_STAT_TASK_ABORTED; ufshcd_copy_sense_data(lrbp);
result |= scsi_status;
break; break;
default: default:
result |= DID_ERROR << 16; result |= DID_ERROR << 16;
@ -1898,14 +2124,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
/** /**
* ufshcd_uic_cmd_compl - handle completion of uic command * ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance * @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
*/ */
static void ufshcd_uic_cmd_compl(struct ufs_hba *hba) static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{ {
if (hba->active_uic_cmd) { if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |= hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba); ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done); complete(&hba->active_uic_cmd->done);
} }
if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
complete(hba->pwr_done);
} }
/** /**
@ -1960,7 +2192,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
/* Reset interrupt aggregation counters */ /* Reset interrupt aggregation counters */
if (int_aggr_reset) if (int_aggr_reset)
ufshcd_config_int_aggr(hba, INT_AGGR_RESET); ufshcd_reset_intr_aggr(hba);
} }
/** /**
@ -2251,8 +2483,8 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
if (hba->errors) if (hba->errors)
ufshcd_err_handler(hba); ufshcd_err_handler(hba);
if (intr_status & UIC_COMMAND_COMPL) if (intr_status & UFSHCD_UIC_MASK)
ufshcd_uic_cmd_compl(hba); ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL) if (intr_status & UTP_TASK_REQ_COMPL)
ufshcd_tmc_handler(hba); ufshcd_tmc_handler(hba);
@ -2494,6 +2726,8 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
if (ret) if (ret)
goto out; goto out;
ufshcd_config_max_pwr_mode(hba);
ret = ufshcd_verify_dev_init(hba); ret = ufshcd_verify_dev_init(hba);
if (ret) if (ret)
goto out; goto out;

View file

@ -175,6 +175,7 @@ struct ufs_dev_cmd {
* @active_uic_cmd: handle of active UIC command * @active_uic_cmd: handle of active UIC command
* @uic_cmd_mutex: mutex for uic command * @uic_cmd_mutex: mutex for uic command
* @ufshcd_tm_wait_queue: wait queue for task management * @ufshcd_tm_wait_queue: wait queue for task management
* @pwr_done: completion for power mode change
* @tm_condition: condition variable for task management * @tm_condition: condition variable for task management
* @ufshcd_state: UFSHCD states * @ufshcd_state: UFSHCD states
* @intr_mask: Interrupt Mask Bits * @intr_mask: Interrupt Mask Bits
@ -219,6 +220,8 @@ struct ufs_hba {
wait_queue_head_t ufshcd_tm_wait_queue; wait_queue_head_t ufshcd_tm_wait_queue;
unsigned long tm_condition; unsigned long tm_condition;
struct completion *pwr_done;
u32 ufshcd_state; u32 ufshcd_state;
u32 intr_mask; u32 intr_mask;
u16 ee_ctrl_mask; u16 ee_ctrl_mask;
@ -263,4 +266,55 @@ static inline void check_upiu_size(void)
extern int ufshcd_runtime_suspend(struct ufs_hba *hba); extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba); extern int ufshcd_runtime_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_idle(struct ufs_hba *hba); extern int ufshcd_runtime_idle(struct ufs_hba *hba);
extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer);
extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer);
/* UIC command interfaces for DME primitives */
#define DME_LOCAL 0
#define DME_PEER 1
#define ATTR_SET_NOR 0 /* NORMAL */
#define ATTR_SET_ST 1 /* STATIC */
static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
u32 mib_val)
{
return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
mib_val, DME_LOCAL);
}
static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
u32 mib_val)
{
return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
mib_val, DME_LOCAL);
}
static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
u32 mib_val)
{
return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
mib_val, DME_PEER);
}
static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
u32 mib_val)
{
return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
mib_val, DME_PEER);
}
static inline int ufshcd_dme_get(struct ufs_hba *hba,
u32 attr_sel, u32 *mib_val)
{
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
}
static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
u32 attr_sel, u32 *mib_val)
{
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
}
#endif /* End of Header */ #endif /* End of Header */

View file

@ -124,6 +124,9 @@ enum {
#define CONTROLLER_FATAL_ERROR UFS_BIT(16) #define CONTROLLER_FATAL_ERROR UFS_BIT(16)
#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\
UIC_POWER_MODE)
#define UFSHCD_ERROR_MASK (UIC_ERROR |\ #define UFSHCD_ERROR_MASK (UIC_ERROR |\
DEVICE_FATAL_ERROR |\ DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\ CONTROLLER_FATAL_ERROR |\
@ -142,6 +145,15 @@ enum {
#define DEVICE_ERROR_INDICATOR UFS_BIT(5) #define DEVICE_ERROR_INDICATOR UFS_BIT(5)
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
enum {
PWR_OK = 0x0,
PWR_LOCAL = 0x01,
PWR_REMOTE = 0x02,
PWR_BUSY = 0x03,
PWR_ERROR_CAP = 0x04,
PWR_FATAL_ERROR = 0x05,
};
/* HCE - Host Controller Enable 34h */ /* HCE - Host Controller Enable 34h */
#define CONTROLLER_ENABLE UFS_BIT(0) #define CONTROLLER_ENABLE UFS_BIT(0)
#define CONTROLLER_DISABLE 0x0 #define CONTROLLER_DISABLE 0x0
@ -191,6 +203,12 @@ enum {
#define CONFIG_RESULT_CODE_MASK 0xFF #define CONFIG_RESULT_CODE_MASK 0xFF
#define GENERIC_ERROR_CODE_MASK 0xFF #define GENERIC_ERROR_CODE_MASK 0xFF
#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
((sel) & 0xFFFF))
#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
/* UIC Commands */ /* UIC Commands */
enum { enum {
UIC_CMD_DME_GET = 0x01, UIC_CMD_DME_GET = 0x01,
@ -226,8 +244,8 @@ enum {
#define MASK_UIC_COMMAND_RESULT 0xFF #define MASK_UIC_COMMAND_RESULT 0xFF
#define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8) #define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8)
#define INT_AGGR_TIMEOUT_VALUE (0x02) #define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0)
/* Interrupt disable masks */ /* Interrupt disable masks */
enum { enum {

151
drivers/scsi/ufs/unipro.h Normal file
View file

@ -0,0 +1,151 @@
/*
* drivers/scsi/ufs/unipro.h
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef _UNIPRO_H_
#define _UNIPRO_H_
/*
* PHY Adpater attributes
*/
#define PA_ACTIVETXDATALANES 0x1560
#define PA_ACTIVERXDATALANES 0x1580
#define PA_TXTRAILINGCLOCKS 0x1564
#define PA_PHY_TYPE 0x1500
#define PA_AVAILTXDATALANES 0x1520
#define PA_AVAILRXDATALANES 0x1540
#define PA_MINRXTRAILINGCLOCKS 0x1543
#define PA_TXPWRSTATUS 0x1567
#define PA_RXPWRSTATUS 0x1582
#define PA_TXFORCECLOCK 0x1562
#define PA_TXPWRMODE 0x1563
#define PA_LEGACYDPHYESCDL 0x1570
#define PA_MAXTXSPEEDFAST 0x1521
#define PA_MAXTXSPEEDSLOW 0x1522
#define PA_MAXRXSPEEDFAST 0x1541
#define PA_MAXRXSPEEDSLOW 0x1542
#define PA_TXLINKSTARTUPHS 0x1544
#define PA_TXSPEEDFAST 0x1565
#define PA_TXSPEEDSLOW 0x1566
#define PA_REMOTEVERINFO 0x15A0
#define PA_TXGEAR 0x1568
#define PA_TXTERMINATION 0x1569
#define PA_HSSERIES 0x156A
#define PA_PWRMODE 0x1571
#define PA_RXGEAR 0x1583
#define PA_RXTERMINATION 0x1584
#define PA_MAXRXPWMGEAR 0x1586
#define PA_MAXRXHSGEAR 0x1587
#define PA_RXHSUNTERMCAP 0x15A5
#define PA_RXLSTERMCAP 0x15A6
#define PA_PACPREQTIMEOUT 0x1590
#define PA_PACPREQEOBTIMEOUT 0x1591
#define PA_HIBERN8TIME 0x15A7
#define PA_LOCALVERINFO 0x15A9
#define PA_TACTIVATE 0x15A8
#define PA_PACPFRAMECOUNT 0x15C0
#define PA_PACPERRORCOUNT 0x15C1
#define PA_PHYTESTCONTROL 0x15C2
#define PA_PWRMODEUSERDATA0 0x15B0
#define PA_PWRMODEUSERDATA1 0x15B1
#define PA_PWRMODEUSERDATA2 0x15B2
#define PA_PWRMODEUSERDATA3 0x15B3
#define PA_PWRMODEUSERDATA4 0x15B4
#define PA_PWRMODEUSERDATA5 0x15B5
#define PA_PWRMODEUSERDATA6 0x15B6
#define PA_PWRMODEUSERDATA7 0x15B7
#define PA_PWRMODEUSERDATA8 0x15B8
#define PA_PWRMODEUSERDATA9 0x15B9
#define PA_PWRMODEUSERDATA10 0x15BA
#define PA_PWRMODEUSERDATA11 0x15BB
#define PA_CONNECTEDTXDATALANES 0x1561
#define PA_CONNECTEDRXDATALANES 0x1581
#define PA_LOGICALLANEMAP 0x15A1
#define PA_SLEEPNOCONFIGTIME 0x15A2
#define PA_STALLNOCONFIGTIME 0x15A3
#define PA_SAVECONFIGTIME 0x15A4
/* PA power modes */
enum {
FAST_MODE = 1,
SLOW_MODE = 2,
FASTAUTO_MODE = 4,
SLOWAUTO_MODE = 5,
UNCHANGED = 7,
};
/* PA TX/RX Frequency Series */
enum {
PA_HS_MODE_A = 1,
PA_HS_MODE_B = 2,
};
/*
* Data Link Layer Attributes
*/
#define DL_TC0TXFCTHRESHOLD 0x2040
#define DL_FC0PROTTIMEOUTVAL 0x2041
#define DL_TC0REPLAYTIMEOUTVAL 0x2042
#define DL_AFC0REQTIMEOUTVAL 0x2043
#define DL_AFC0CREDITTHRESHOLD 0x2044
#define DL_TC0OUTACKTHRESHOLD 0x2045
#define DL_TC1TXFCTHRESHOLD 0x2060
#define DL_FC1PROTTIMEOUTVAL 0x2061
#define DL_TC1REPLAYTIMEOUTVAL 0x2062
#define DL_AFC1REQTIMEOUTVAL 0x2063
#define DL_AFC1CREDITTHRESHOLD 0x2064
#define DL_TC1OUTACKTHRESHOLD 0x2065
#define DL_TXPREEMPTIONCAP 0x2000
#define DL_TC0TXMAXSDUSIZE 0x2001
#define DL_TC0RXINITCREDITVAL 0x2002
#define DL_TC0TXBUFFERSIZE 0x2005
#define DL_PEERTC0PRESENT 0x2046
#define DL_PEERTC0RXINITCREVAL 0x2047
#define DL_TC1TXMAXSDUSIZE 0x2003
#define DL_TC1RXINITCREDITVAL 0x2004
#define DL_TC1TXBUFFERSIZE 0x2006
#define DL_PEERTC1PRESENT 0x2066
#define DL_PEERTC1RXINITCREVAL 0x2067
/*
* Network Layer Attributes
*/
#define N_DEVICEID 0x3000
#define N_DEVICEID_VALID 0x3001
#define N_TC0TXMAXSDUSIZE 0x3020
#define N_TC1TXMAXSDUSIZE 0x3021
/*
* Transport Layer Attributes
*/
#define T_NUMCPORTS 0x4000
#define T_NUMTESTFEATURES 0x4001
#define T_CONNECTIONSTATE 0x4020
#define T_PEERDEVICEID 0x4021
#define T_PEERCPORTID 0x4022
#define T_TRAFFICCLASS 0x4023
#define T_PROTOCOLID 0x4024
#define T_CPORTFLAGS 0x4025
#define T_TXTOKENVALUE 0x4026
#define T_RXTOKENVALUE 0x4027
#define T_LOCALBUFFERSPACE 0x4028
#define T_PEERBUFFERSPACE 0x4029
#define T_CREDITSTOSEND 0x402A
#define T_CPORTMODE 0x402B
#define T_TC0TXMAXSDUSIZE 0x4060
#define T_TC1TXMAXSDUSIZE 0x4061
/* Boolean attribute values */
enum {
FALSE = 0,
TRUE,
};
#endif /* _UNIPRO_H_ */

View file

@ -758,6 +758,7 @@
#define PCI_DEVICE_ID_HP_CISSE 0x323a #define PCI_DEVICE_ID_HP_CISSE 0x323a
#define PCI_DEVICE_ID_HP_CISSF 0x323b #define PCI_DEVICE_ID_HP_CISSF 0x323b
#define PCI_DEVICE_ID_HP_CISSH 0x323c #define PCI_DEVICE_ID_HP_CISSH 0x323c
#define PCI_DEVICE_ID_HP_CISSI 0x3239
#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
#define PCI_VENDOR_ID_PCTECH 0x1042 #define PCI_VENDOR_ID_PCTECH 0x1042