diff options
-rw-r--r-- | drivers/scsi/csiostor/csio_scsi.c | 2 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_trace.c | 16 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_defs.h | 2 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_hwi.c | 5 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_sas.c | 78 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_sas.h | 2 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm80xx_hwi.c | 59 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_def.h | 2 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_dfs.c | 124 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_gbl.h | 3 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_init.c | 28 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd.c | 253 | ||||
-rw-r--r-- | include/scsi/scsi_bsg_iscsi.h | 2 | ||||
-rw-r--r-- | include/ufs/ufshcd.h | 25 |
14 files changed, 379 insertions, 222 deletions
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 7d2149e486af..34bde6650fae 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -800,7 +800,7 @@ csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) rn = req->rnode; /* * FW says remote device is lost, but rnode - * doesnt reflect it. + * doesn't reflect it. */ if (csio_scsi_itnexus_loss_error(req->wr_status) && csio_is_rnode_ready(rn)) { diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index aaa4ea02fb7c..e5e0c0492f23 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -485,8 +485,7 @@ int fnic_trace_buf_init(void) } fnic_trace_entries.page_offset = - vmalloc(array_size(fnic_max_trace_entries, - sizeof(unsigned long))); + vcalloc(fnic_max_trace_entries, sizeof(unsigned long)); if (!fnic_trace_entries.page_offset) { printk(KERN_ERR PFX "Failed to allocate memory for" " page_offset\n"); @@ -497,8 +496,6 @@ int fnic_trace_buf_init(void) err = -ENOMEM; goto err_fnic_trace_buf_init; } - memset((void *)fnic_trace_entries.page_offset, 0, - (fnic_max_trace_entries * sizeof(unsigned long))); fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0; fnic_buf_head = fnic_trace_buf_p; @@ -559,8 +556,7 @@ int fnic_fc_trace_init(void) fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ FC_TRC_SIZE_BYTES; fnic_fc_ctlr_trace_buf_p = - (unsigned long)vmalloc(array_size(PAGE_SIZE, - fnic_fc_trace_max_pages)); + (unsigned long)vcalloc(fnic_fc_trace_max_pages, PAGE_SIZE); if (!fnic_fc_ctlr_trace_buf_p) { pr_err("fnic: Failed to allocate memory for " "FC Control Trace Buf\n"); @@ -568,13 +564,9 @@ int fnic_fc_trace_init(void) goto err_fnic_fc_ctlr_trace_buf_init; } - memset((void *)fnic_fc_ctlr_trace_buf_p, 0, - fnic_fc_trace_max_pages * PAGE_SIZE); - /* Allocate memory for page offset */ fc_trace_entries.page_offset = - vmalloc(array_size(fc_trace_max_entries, - sizeof(unsigned long))); + vcalloc(fc_trace_max_entries, sizeof(unsigned long)); if (!fc_trace_entries.page_offset) { pr_err("fnic:Failed to allocate memory for page_offset\n"); if (fnic_fc_ctlr_trace_buf_p) { @@ -585,8 +577,6 @@ int fnic_fc_trace_init(void) err = -ENOMEM; goto err_fnic_fc_ctlr_trace_buf_init; } - memset((void *)fc_trace_entries.page_offset, 0, - (fc_trace_max_entries * sizeof(unsigned long))); fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index 7871e29a820a..4e19d61dffbb 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -90,7 +90,7 @@ enum port_type { #define PM8001_MAX_PORTS 16 /* max. possible ports */ #define PM8001_MAX_DEVICES 2048 /* max supported device */ #define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ -#define PM8001_RESERVE_SLOT 8 +#define PM8001_RESERVE_SLOT 128 #define PM8001_SECTOR_SIZE 512 #define PM8001_PAGE_SIZE_4K 4096 diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index dec1e2d380f1..42a4eeac24c9 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -3472,12 +3472,13 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) status, tag, scp); switch (status) { case IO_SUCCESS: - pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n"); + pm8001_dbg(pm8001_ha, FAIL, "ABORT IO_SUCCESS for tag %#x\n", + tag); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_SAM_STAT_GOOD; break; case IO_NOT_VALID: - pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n"); + pm8001_dbg(pm8001_ha, FAIL, "IO_NOT_VALID for tag %#x\n", tag); ts->resp = TMF_RESP_FUNC_FAILED; break; } diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index d80cffd25a6e..183ce00aa671 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -101,6 +101,63 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) return 0; } +static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op, + int *ata_tag, bool *task_aborted) +{ + unsigned long flags; + struct ata_queued_cmd *qc = NULL; + + *ata_op = 0; + *ata_tag = -1; + *task_aborted = false; + + if (!task) + return; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) + *task_aborted = true; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + if (task->task_proto == SAS_PROTOCOL_STP) { + // sas_ata_qc_issue path uses SAS_PROTOCOL_STP. + // This only works for scsi + libsas + libata users. + qc = task->uldd_task; + if (qc) { + *ata_op = qc->tf.command; + *ata_tag = qc->tag; + } + } +} + +void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *target_pm8001_dev) +{ + int i = 0, ata_op = 0, ata_tag = -1; + struct pm8001_ccb_info *ccb = NULL; + struct sas_task *task = NULL; + struct pm8001_device *pm8001_dev = NULL; + bool task_aborted; + + for (i = 0; i < pm8001_ha->ccb_count; i++) { + ccb = &pm8001_ha->ccb_info[i]; + if (ccb->ccb_tag == PM8001_INVALID_TAG) + continue; + pm8001_dev = ccb->device; + if (target_pm8001_dev && pm8001_dev && + target_pm8001_dev != pm8001_dev) + continue; + task = ccb->task; + pm80xx_get_tag_opcodes(task, &ata_op, &ata_tag, &task_aborted); + pm8001_dbg(pm8001_ha, FAIL, + "tag %#x, device %#x task %p task aborted %d ata opcode %#x ata tag %d\n", + ccb->ccb_tag, + (pm8001_dev ? pm8001_dev->device_id : 0), + task, task_aborted, + ata_op, ata_tag); + } +} + /** * pm8001_mem_alloc - allocate memory for pm8001. * @pdev: pci device. @@ -374,23 +431,6 @@ static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); } - /* Find the local port id that's attached to this device */ -static int sas_find_local_port_id(struct domain_device *dev) -{ - struct domain_device *pdev = dev->parent; - - /* Directly attached device */ - if (!pdev) - return dev->port->id; - while (pdev) { - struct domain_device *pdev_p = pdev->parent; - if (!pdev_p) - return pdev->port->id; - pdev = pdev->parent; - } - return 0; -} - #define DEV_IS_GONE(pm8001_dev) \ ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) @@ -463,10 +503,10 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) spin_lock_irqsave(&pm8001_ha->lock, flags); pm8001_dev = dev->lldd_dev; - port = &pm8001_ha->port[sas_find_local_port_id(dev)]; + port = pm8001_ha->phy[pm8001_dev->attached_phy].port; if (!internal_abort && - (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) { + (DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) { ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; if (sas_protocol_ata(task_proto)) { diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 42c7b3f7afbf..a349cf50770c 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -787,6 +787,8 @@ static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha, } void pm8001_setds_completion(struct domain_device *dev); void pm8001_tmf_aborted(struct sas_task *task); +void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *dev); #endif diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index e65951dd2024..5b373c53c036 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -2246,7 +2246,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, u32 param; u32 status; u32 tag; - int i, j; + int i, j, ata_tag = -1; u8 sata_addr_low[4]; u32 temp_sata_addr_low, temp_sata_addr_hi; u8 sata_addr_hi[4]; @@ -2256,6 +2256,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, u32 *sata_resp; struct pm8001_device *pm8001_dev; unsigned long flags; + struct ata_queued_cmd *qc; psataPayload = (struct sata_completion_resp *)(piomb + 4); status = le32_to_cpu(psataPayload->status); @@ -2267,8 +2268,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, pm8001_dev = ccb->device; if (t) { - if (t->dev && (t->dev->lldd_dev)) + if (t->dev && (t->dev->lldd_dev)) { pm8001_dev = t->dev->lldd_dev; + qc = t->uldd_task; + ata_tag = qc ? qc->tag : -1; + } } else { pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", ccb->ccb_tag); @@ -2276,16 +2280,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, return; } - if (pm8001_dev && unlikely(!t->lldd_task || !t->dev)) return; ts = &t->task_status; - if (status != IO_SUCCESS) { pm8001_dbg(pm8001_ha, FAIL, - "IO failed device_id %u status 0x%x tag %d\n", - pm8001_dev->device_id, status, tag); + "IO failed status %#x pm80xx tag %#x ata tag %d\n", + status, tag, ata_tag); } /* Print sas address of IO failed device */ @@ -2667,13 +2669,19 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, /* Check if this is NCQ error */ if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* tag value is invalid with this event */ + pm8001_dbg(pm8001_ha, FAIL, "NCQ ERROR for device %#x tag %#x\n", + dev_id, tag); + /* find device using device id */ pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); /* send read log extension by aborting the link - libata does what we want */ - if (pm8001_dev) + if (pm8001_dev) { + pm80xx_show_pending_commands(pm8001_ha, pm8001_dev); pm8001_handle_event(pm8001_ha, pm8001_dev, IO_XFER_ERROR_ABORTED_NCQ_MODE); + } return; } @@ -3336,10 +3344,11 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 phy_id = le32_to_cpu(pPayload->phyid) & 0xFF; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + u32 tag = le32_to_cpu(pPayload->tag); pm8001_dbg(pm8001_ha, INIT, - "phy start resp status:0x%x, phyid:0x%x\n", - status, phy_id); + "phy start resp status:0x%x, phyid:0x%x, tag 0x%x\n", + status, phy_id, tag); if (status == 0) phy->phy_state = PHY_LINK_DOWN; @@ -3348,6 +3357,8 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) complete(phy->enable_completion); phy->enable_completion = NULL; } + + pm8001_tag_free(pm8001_ha, tag); return 0; } @@ -3628,8 +3639,10 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 phyid = le32_to_cpu(pPayload->phyid) & 0xFF; struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; - pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n", - phyid, status); + u32 tag = le32_to_cpu(pPayload->tag); + + pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x tag 0x%x\n", phyid, + status, tag); if (status == PHY_STOP_SUCCESS || status == PHY_STOP_ERR_DEVICE_ATTACHED) { phy->phy_state = PHY_LINK_DISABLE; @@ -3637,6 +3650,7 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) phy->sas_phy.linkrate = SAS_PHY_DISABLED; } + pm8001_tag_free(pm8001_ha, tag); return 0; } @@ -3655,10 +3669,9 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, u32 tag = le32_to_cpu(pPayload->tag); pm8001_dbg(pm8001_ha, MSG, - "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", - status, err_qlfr_pgcd); + "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x tag 0x%x\n", + status, err_qlfr_pgcd, tag); pm8001_tag_free(pm8001_ha, tag); - return 0; } @@ -4632,9 +4645,16 @@ static int pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_start_req payload; - u32 tag = 0x01; + int ret; + u32 tag; u32 opcode = OPC_INB_PHYSTART; + ret = pm8001_tag_alloc(pm8001_ha, &tag); + if (ret) { + pm8001_dbg(pm8001_ha, FAIL, "Tag allocation failed\n"); + return ret; + } + memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); @@ -4670,9 +4690,16 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_stop_req payload; - u32 tag = 0x01; + int ret; + u32 tag; u32 opcode = OPC_INB_PHYSTOP; + ret = pm8001_tag_alloc(pm8001_ha, &tag); + if (ret) { + pm8001_dbg(pm8001_ha, FAIL, "Tag allocation failed\n"); + return ret; + } + memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 15066c112817..cb95b7b12051 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -4098,6 +4098,8 @@ struct qla_hw_data { uint32_t npiv_supported :1; uint32_t pci_channel_io_perm_failure :1; uint32_t fce_enabled :1; + uint32_t user_enabled_fce :1; + uint32_t fce_dump_buf_alloced :1; uint32_t fac_supported :1; uint32_t chip_reset_done :1; diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index a1545dad0c0c..08273520c777 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -409,26 +409,31 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused) mutex_lock(&ha->fce_mutex); - seq_puts(s, "FCE Trace Buffer\n"); - seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); - seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); - seq_puts(s, "FCE Enable Registers\n"); - seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", - ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], - ha->fce_mb[5], ha->fce_mb[6]); - - fce = (uint32_t *) ha->fce; - fce_start = (unsigned long long) ha->fce_dma; - for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) { - if (cnt % 8 == 0) - seq_printf(s, "\n%llx: ", - (unsigned long long)((cnt * 4) + fce_start)); - else - seq_putc(s, ' '); - seq_printf(s, "%08x", *fce++); - } + if (ha->flags.user_enabled_fce) { + seq_puts(s, "FCE Trace Buffer\n"); + seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); + seq_printf(s, "Base = %llx\n\n", (unsigned long long)ha->fce_dma); + seq_puts(s, "FCE Enable Registers\n"); + seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", + ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], + ha->fce_mb[5], ha->fce_mb[6]); + + fce = (uint32_t *)ha->fce; + fce_start = (unsigned long long)ha->fce_dma; + for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) { + if (cnt % 8 == 0) + seq_printf(s, "\n%llx: ", + (unsigned long long)((cnt * 4) + fce_start)); + else + seq_putc(s, ' '); + seq_printf(s, "%08x", *fce++); + } - seq_puts(s, "\nEnd\n"); + seq_puts(s, "\nEnd\n"); + } else { + seq_puts(s, "FCE Trace is currently not enabled\n"); + seq_puts(s, "\techo [ 1 | 0 ] > fce\n"); + } mutex_unlock(&ha->fce_mutex); @@ -467,7 +472,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file) struct qla_hw_data *ha = vha->hw; int rval; - if (ha->flags.fce_enabled) + if (ha->flags.fce_enabled || !ha->fce) goto out; mutex_lock(&ha->fce_mutex); @@ -488,11 +493,88 @@ out: return single_release(inode, file); } +static ssize_t +qla2x00_dfs_fce_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct seq_file *s = file->private_data; + struct scsi_qla_host *vha = s->private; + struct qla_hw_data *ha = vha->hw; + char *buf; + int rc = 0; + unsigned long enable; + + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) { + ql_dbg(ql_dbg_user, vha, 0xd034, + "this adapter does not support FCE."); + return -EINVAL; + } + + buf = memdup_user_nul(buffer, count); + if (IS_ERR(buf)) { + ql_dbg(ql_dbg_user, vha, 0xd037, + "fail to copy user buffer."); + return PTR_ERR(buf); + } + + enable = kstrtoul(buf, 0, 0); + rc = count; + + mutex_lock(&ha->fce_mutex); + + if (enable) { + if (ha->flags.user_enabled_fce) { + mutex_unlock(&ha->fce_mutex); + goto out_free; + } + ha->flags.user_enabled_fce = 1; + if (!ha->fce) { + rc = qla2x00_alloc_fce_trace(vha); + if (rc) { + ha->flags.user_enabled_fce = 0; + mutex_unlock(&ha->fce_mutex); + goto out_free; + } + + /* adjust fw dump buffer to take into account of this feature */ + if (!ha->flags.fce_dump_buf_alloced) + qla2x00_alloc_fw_dump(vha); + } + + if (!ha->flags.fce_enabled) + qla_enable_fce_trace(vha); + + ql_dbg(ql_dbg_user, vha, 0xd045, "User enabled FCE .\n"); + } else { + if (!ha->flags.user_enabled_fce) { + mutex_unlock(&ha->fce_mutex); + goto out_free; + } + ha->flags.user_enabled_fce = 0; + if (ha->flags.fce_enabled) { + qla2x00_disable_fce_trace(vha, NULL, NULL); + ha->flags.fce_enabled = 0; + } + + qla2x00_free_fce_trace(ha); + /* no need to re-adjust fw dump buffer */ + + ql_dbg(ql_dbg_user, vha, 0xd04f, "User disabled FCE .\n"); + } + + mutex_unlock(&ha->fce_mutex); +out_free: + kfree(buf); + return rc; +} + static const struct file_operations dfs_fce_ops = { .open = qla2x00_dfs_fce_open, .read = seq_read, .llseek = seq_lseek, .release = qla2x00_dfs_fce_release, + .write = qla2x00_dfs_fce_write, }; static int @@ -626,8 +708,6 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto out; - if (!ha->fce) - goto out; if (qla2x00_dfs_root) goto create_dir; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index cededfda9d0e..e556f57c91af 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -11,6 +11,9 @@ /* * Global Function Prototypes in qla_init.c source file. */ +int qla2x00_alloc_fce_trace(scsi_qla_host_t *); +void qla2x00_free_fce_trace(struct qla_hw_data *ha); +void qla_enable_fce_trace(scsi_qla_host_t *); extern int qla2x00_initialize_adapter(scsi_qla_host_t *); extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 31fc6a0eca3e..79cdfec2bca3 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2681,7 +2681,7 @@ exit: return rval; } -static void qla_enable_fce_trace(scsi_qla_host_t *vha) +void qla_enable_fce_trace(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; @@ -3717,25 +3717,24 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) return rval; } -static void -qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) +int qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) { dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; if (!IS_FWI2_CAPABLE(ha)) - return; + return -EINVAL; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) - return; + return -EINVAL; if (ha->fce) { ql_dbg(ql_dbg_init, vha, 0x00bd, "%s: FCE Mem is already allocated.\n", __func__); - return; + return -EIO; } /* Allocate memory for Fibre Channel Event Buffer. */ @@ -3745,7 +3744,7 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) ql_log(ql_log_warn, vha, 0x00be, "Unable to allocate (%d KB) for FCE.\n", FCE_SIZE / 1024); - return; + return -ENOMEM; } ql_dbg(ql_dbg_init, vha, 0x00c0, @@ -3754,6 +3753,16 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) ha->fce_dma = tc_dma; ha->fce = tc; ha->fce_bufs = FCE_NUM_BUFFERS; + return 0; +} + +void qla2x00_free_fce_trace(struct qla_hw_data *ha) +{ + if (!ha->fce) + return; + dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ha->fce_dma); + ha->fce = NULL; + ha->fce_dma = 0; } static void @@ -3844,9 +3853,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); - qla2x00_alloc_fce_trace(vha); - if (ha->fce) + if (ha->fce) { fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; + ha->flags.fce_dump_buf_alloced = 1; + } qla2x00_alloc_eft_trace(vha); if (ha->eft) eft_size = EFT_SIZE; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 661f9811ea4b..528a9f35cf37 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -258,10 +258,15 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, return UFS_PM_LVL_0; } +static bool ufshcd_has_pending_tasks(struct ufs_hba *hba) +{ + return hba->outstanding_tasks || hba->active_uic_cmd || + hba->uic_async_done; +} + static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba) { - return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks || - hba->active_uic_cmd || hba->uic_async_done); + return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba); } static const struct ufs_dev_quirk ufs_fixups[] = { @@ -1447,16 +1452,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) { struct ufs_hba *hba = container_of(work, struct ufs_hba, clk_scaling.suspend_work); - unsigned long irq_flags; - spin_lock_irqsave(hba->host->host_lock, irq_flags); - if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - return; + scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) + { + if (hba->clk_scaling.active_reqs || + hba->clk_scaling.is_suspended) + return; + + hba->clk_scaling.is_suspended = true; + hba->clk_scaling.window_start_t = 0; } - hba->clk_scaling.is_suspended = true; - hba->clk_scaling.window_start_t = 0; - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); devfreq_suspend_device(hba->devfreq); } @@ -1465,15 +1470,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work) { struct ufs_hba *hba = container_of(work, struct ufs_hba, clk_scaling.resume_work); - unsigned long irq_flags; - spin_lock_irqsave(hba->host->host_lock, irq_flags); - if (!hba->clk_scaling.is_suspended) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - return; + scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) + { + if (!hba->clk_scaling.is_suspended) + return; + hba->clk_scaling.is_suspended = false; } - hba->clk_scaling.is_suspended = false; - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); devfreq_resume_device(hba->devfreq); } @@ -1487,7 +1490,6 @@ static int ufshcd_devfreq_target(struct device *dev, bool scale_up = false, sched_clk_scaling_suspend_work = false; struct list_head *clk_list = &hba->clk_list_head; struct ufs_clk_info *clki; - unsigned long irq_flags; if (!ufshcd_is_clkscaling_supported(hba)) return -EINVAL; @@ -1508,43 +1510,38 @@ static int ufshcd_devfreq_target(struct device *dev, *freq = (unsigned long) clk_round_rate(clki->clk, *freq); } - spin_lock_irqsave(hba->host->host_lock, irq_flags); - if (ufshcd_eh_in_progress(hba)) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - return 0; - } + scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) + { + if (ufshcd_eh_in_progress(hba)) + return 0; - /* Skip scaling clock when clock scaling is suspended */ - if (hba->clk_scaling.is_suspended) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - dev_warn(hba->dev, "clock scaling is suspended, skip"); - return 0; - } + /* Skip scaling clock when clock scaling is suspended */ + if (hba->clk_scaling.is_suspended) { + dev_warn(hba->dev, "clock scaling is suspended, skip"); + return 0; + } - if (!hba->clk_scaling.active_reqs) - sched_clk_scaling_suspend_work = true; + if (!hba->clk_scaling.active_reqs) + sched_clk_scaling_suspend_work = true; - if (list_empty(clk_list)) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - goto out; - } + if (list_empty(clk_list)) + goto out; - /* Decide based on the target or rounded-off frequency and update */ - if (hba->use_pm_opp) - scale_up = *freq > hba->clk_scaling.target_freq; - else - scale_up = *freq == clki->max_freq; + /* Decide based on the target or rounded-off frequency and update */ + if (hba->use_pm_opp) + scale_up = *freq > hba->clk_scaling.target_freq; + else + scale_up = *freq == clki->max_freq; - if (!hba->use_pm_opp && !scale_up) - *freq = clki->min_freq; + if (!hba->use_pm_opp && !scale_up) + *freq = clki->min_freq; - /* Update the frequency */ - if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - ret = 0; - goto out; /* no state change required */ + /* Update the frequency */ + if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) { + ret = 0; + goto out; /* no state change required */ + } } - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); start = ktime_get(); ret = ufshcd_devfreq_scale(hba, *freq, scale_up); @@ -1569,7 +1566,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, { struct ufs_hba *hba = dev_get_drvdata(dev); struct ufs_clk_scaling *scaling = &hba->clk_scaling; - unsigned long flags; ktime_t curr_t; if (!ufshcd_is_clkscaling_supported(hba)) @@ -1577,7 +1573,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, memset(stat, 0, sizeof(*stat)); - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_scaling.lock); + curr_t = ktime_get(); if (!scaling->window_start_t) goto start_window; @@ -1613,7 +1610,7 @@ start_window: scaling->busy_start_t = 0; scaling->is_busy_started = false; } - spin_unlock_irqrestore(hba->host->host_lock, flags); + return 0; } @@ -1677,19 +1674,19 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba) static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) { - unsigned long flags; bool suspend = false; cancel_work_sync(&hba->clk_scaling.suspend_work); cancel_work_sync(&hba->clk_scaling.resume_work); - spin_lock_irqsave(hba->host->host_lock, flags); - if (!hba->clk_scaling.is_suspended) { - suspend = true; - hba->clk_scaling.is_suspended = true; - hba->clk_scaling.window_start_t = 0; + scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) + { + if (!hba->clk_scaling.is_suspended) { + suspend = true; + hba->clk_scaling.is_suspended = true; + hba->clk_scaling.window_start_t = 0; + } } - spin_unlock_irqrestore(hba->host->host_lock, flags); if (suspend) devfreq_suspend_device(hba->devfreq); @@ -1697,15 +1694,15 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) static void ufshcd_resume_clkscaling(struct ufs_hba *hba) { - unsigned long flags; bool resume = false; - spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->clk_scaling.is_suspended) { - resume = true; - hba->clk_scaling.is_suspended = false; + scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) + { + if (hba->clk_scaling.is_suspended) { + resume = true; + hba->clk_scaling.is_suspended = false; + } } - spin_unlock_irqrestore(hba->host->host_lock, flags); if (resume) devfreq_resume_device(hba->devfreq); @@ -1791,6 +1788,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba) INIT_WORK(&hba->clk_scaling.resume_work, ufshcd_clk_scaling_resume_work); + spin_lock_init(&hba->clk_scaling.lock); + hba->clk_scaling.workq = alloc_ordered_workqueue( "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no); @@ -1811,19 +1810,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) static void ufshcd_ungate_work(struct work_struct *work) { int ret; - unsigned long flags; struct ufs_hba *hba = container_of(work, struct ufs_hba, clk_gating.ungate_work); cancel_delayed_work_sync(&hba->clk_gating.gate_work); - spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->clk_gating.state == CLKS_ON) { - spin_unlock_irqrestore(hba->host->host_lock, flags); - return; + scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) { + if (hba->clk_gating.state == CLKS_ON) + return; } - spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_hba_vreg_set_hpm(hba); ufshcd_setup_clocks(hba, true); @@ -1858,7 +1854,7 @@ void ufshcd_hold(struct ufs_hba *hba) if (!ufshcd_is_clkgating_allowed(hba) || !hba->clk_gating.is_initialized) return; - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); hba->clk_gating.active_reqs++; start: @@ -1874,11 +1870,11 @@ start: */ if (ufshcd_can_hibern8_during_gating(hba) && ufshcd_is_link_hibern8(hba)) { - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); flush_result = flush_work(&hba->clk_gating.ungate_work); if (hba->clk_gating.is_suspended && !flush_result) return; - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); goto start; } break; @@ -1907,17 +1903,17 @@ start: */ fallthrough; case REQ_CLKS_ON: - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); flush_work(&hba->clk_gating.ungate_work); /* Make sure state is CLKS_ON before returning */ - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); goto start; default: dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", __func__, hba->clk_gating.state); break; } - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); } EXPORT_SYMBOL_GPL(ufshcd_hold); @@ -1925,28 +1921,32 @@ static void ufshcd_gate_work(struct work_struct *work) { struct ufs_hba *hba = container_of(work, struct ufs_hba, clk_gating.gate_work.work); - unsigned long flags; int ret; - spin_lock_irqsave(hba->host->host_lock, flags); - /* - * In case you are here to cancel this work the gating state - * would be marked as REQ_CLKS_ON. In this case save time by - * skipping the gating work and exit after changing the clock - * state to CLKS_ON. - */ - if (hba->clk_gating.is_suspended || - (hba->clk_gating.state != REQ_CLKS_OFF)) { - hba->clk_gating.state = CLKS_ON; - trace_ufshcd_clk_gating(dev_name(hba->dev), - hba->clk_gating.state); - goto rel_lock; - } + scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) { + /* + * In case you are here to cancel this work the gating state + * would be marked as REQ_CLKS_ON. In this case save time by + * skipping the gating work and exit after changing the clock + * state to CLKS_ON. + */ + if (hba->clk_gating.is_suspended || + hba->clk_gating.state != REQ_CLKS_OFF) { + hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + return; + } - if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) - goto rel_lock; + if (hba->clk_gating.active_reqs) + return; + } - spin_unlock_irqrestore(hba->host->host_lock, flags); + scoped_guard(spinlock_irqsave, hba->host->host_lock) { + if (ufshcd_is_ufs_dev_busy(hba) || + hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) + return; + } /* put the link into hibern8 mode before turning off clocks */ if (ufshcd_can_hibern8_during_gating(hba)) { @@ -1957,7 +1957,7 @@ static void ufshcd_gate_work(struct work_struct *work) __func__, ret); trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - goto out; + return; } ufshcd_set_link_hibern8(hba); } @@ -1977,33 +1977,34 @@ static void ufshcd_gate_work(struct work_struct *work) * prevent from doing cancel work multiple times when there are * new requests arriving before the current cancel work is done. */ - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_gating.lock); if (hba->clk_gating.state == REQ_CLKS_OFF) { hba->clk_gating.state = CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); } -rel_lock: - spin_unlock_irqrestore(hba->host->host_lock, flags); -out: - return; } -/* host lock must be held before calling this variant */ static void __ufshcd_release(struct ufs_hba *hba) { + lockdep_assert_held(&hba->clk_gating.lock); + if (!ufshcd_is_clkgating_allowed(hba)) return; hba->clk_gating.active_reqs--; if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || - hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || - hba->outstanding_tasks || !hba->clk_gating.is_initialized || - hba->active_uic_cmd || hba->uic_async_done || + !hba->clk_gating.is_initialized || hba->clk_gating.state == CLKS_OFF) return; + scoped_guard(spinlock_irqsave, hba->host->host_lock) { + if (ufshcd_has_pending_tasks(hba) || + hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) + return; + } + hba->clk_gating.state = REQ_CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); queue_delayed_work(hba->clk_gating.clk_gating_workq, @@ -2013,11 +2014,8 @@ static void __ufshcd_release(struct ufs_hba *hba) void ufshcd_release(struct ufs_hba *hba) { - unsigned long flags; - - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_gating.lock); __ufshcd_release(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); } EXPORT_SYMBOL_GPL(ufshcd_release); @@ -2032,11 +2030,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev, void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value) { struct ufs_hba *hba = dev_get_drvdata(dev); - unsigned long flags; - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_gating.lock); hba->clk_gating.delay_ms = value; - spin_unlock_irqrestore(hba->host->host_lock, flags); } EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set); @@ -2064,7 +2060,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ufs_hba *hba = dev_get_drvdata(dev); - unsigned long flags; u32 value; if (kstrtou32(buf, 0, &value)) @@ -2072,9 +2067,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev, value = !!value; - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_gating.lock); + if (value == hba->clk_gating.is_enabled) - goto out; + return count; if (value) __ufshcd_release(hba); @@ -2082,8 +2078,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev, hba->clk_gating.active_reqs++; hba->clk_gating.is_enabled = value; -out: - spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; } @@ -2125,6 +2120,8 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba) INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); + spin_lock_init(&hba->clk_gating.lock); + hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue( "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI, hba->host->host_no); @@ -2154,19 +2151,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) { bool queue_resume_work = false; ktime_t curr_t = ktime_get(); - unsigned long flags; if (!ufshcd_is_clkscaling_supported(hba)) return; - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_scaling.lock); + if (!hba->clk_scaling.active_reqs++) queue_resume_work = true; - if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { - spin_unlock_irqrestore(hba->host->host_lock, flags); + if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) return; - } if (queue_resume_work) queue_work(hba->clk_scaling.workq, @@ -2182,18 +2177,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) hba->clk_scaling.busy_start_t = curr_t; hba->clk_scaling.is_busy_started = true; } - spin_unlock_irqrestore(hba->host->host_lock, flags); } static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) { struct ufs_clk_scaling *scaling = &hba->clk_scaling; - unsigned long flags; if (!ufshcd_is_clkscaling_supported(hba)) return; - spin_lock_irqsave(hba->host->host_lock, flags); + guard(spinlock_irqsave)(&hba->clk_scaling.lock); + hba->clk_scaling.active_reqs--; if (!scaling->active_reqs && scaling->is_busy_started) { scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), @@ -2201,7 +2195,6 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) scaling->busy_start_t = 0; scaling->is_busy_started = false; } - spin_unlock_irqrestore(hba->host->host_lock, flags); } static inline int ufshcd_monitor_opcode2dir(u8 opcode) @@ -8221,7 +8214,9 @@ static void ufshcd_rtc_work(struct work_struct *work) hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work); /* Update RTC only when there are no requests in progress and UFSHCI is operational */ - if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) + if (!ufshcd_is_ufs_dev_busy(hba) && + hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL && + !hba->clk_gating.active_reqs) ufshcd_update_rtc(hba); if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period) @@ -9118,7 +9113,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) int ret = 0; struct ufs_clk_info *clki; struct list_head *head = &hba->clk_list_head; - unsigned long flags; ktime_t start = ktime_get(); bool clk_state_changed = false; @@ -9169,11 +9163,10 @@ out: clk_disable_unprepare(clki->clk); } } else if (!ret && on) { - spin_lock_irqsave(hba->host->host_lock, flags); - hba->clk_gating.state = CLKS_ON; + scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) + hba->clk_gating.state = CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - spin_unlock_irqrestore(hba->host->host_lock, flags); } if (clk_state_changed) diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h index 9b1f0f424a79..a569c35b258d 100644 --- a/include/scsi/scsi_bsg_iscsi.h +++ b/include/scsi/scsi_bsg_iscsi.h @@ -59,7 +59,7 @@ struct iscsi_bsg_host_vendor { */ struct iscsi_bsg_host_vendor_reply { /* start of vendor response area */ - uint32_t vendor_rsp[0]; + DECLARE_FLEX_ARRAY(uint32_t, vendor_rsp); }; diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index d7aca9e61684..ce7667b020e2 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -403,6 +403,9 @@ enum clk_gating_state { * delay_ms * @ungate_work: worker to turn on clocks that will be used in case of * interrupt context + * @clk_gating_workq: workqueue for clock gating work. + * @lock: serialize access to some struct ufs_clk_gating members. An outer lock + * relative to the host lock * @state: the current clocks state * @delay_ms: gating delay in ms * @is_suspended: clk gating is suspended when set to 1 which can be used @@ -413,11 +416,14 @@ enum clk_gating_state { * @is_initialized: Indicates whether clock gating is initialized or not * @active_reqs: number of requests that are pending and should be waited for * completion before gating clocks. - * @clk_gating_workq: workqueue for clock gating work. */ struct ufs_clk_gating { struct delayed_work gate_work; struct work_struct ungate_work; + struct workqueue_struct *clk_gating_workq; + + spinlock_t lock; + enum clk_gating_state state; unsigned long delay_ms; bool is_suspended; @@ -426,11 +432,14 @@ struct ufs_clk_gating { bool is_enabled; bool is_initialized; int active_reqs; - struct workqueue_struct *clk_gating_workq; }; /** * struct ufs_clk_scaling - UFS clock scaling related data + * @workq: workqueue to schedule devfreq suspend/resume work + * @suspend_work: worker to suspend devfreq + * @resume_work: worker to resume devfreq + * @lock: serialize access to some struct ufs_clk_scaling members * @active_reqs: number of requests that are pending. If this is zero when * devfreq ->target() function is called then schedule "suspend_work" to * suspend devfreq. @@ -440,9 +449,6 @@ struct ufs_clk_gating { * @enable_attr: sysfs attribute to enable/disable clock scaling * @saved_pwr_info: UFS power mode may also be changed during scaling and this * one keeps track of previous power mode. - * @workq: workqueue to schedule devfreq suspend/resume work - * @suspend_work: worker to suspend devfreq - * @resume_work: worker to resume devfreq * @target_freq: frequency requested by devfreq framework * @min_gear: lowest HS gear to scale down to * @is_enabled: tracks if scaling is currently enabled or not, controlled by @@ -454,15 +460,18 @@ struct ufs_clk_gating { * @is_suspended: tracks if devfreq is suspended or not */ struct ufs_clk_scaling { + struct workqueue_struct *workq; + struct work_struct suspend_work; + struct work_struct resume_work; + + spinlock_t lock; + int active_reqs; unsigned long tot_busy_t; ktime_t window_start_t; ktime_t busy_start_t; struct device_attribute enable_attr; struct ufs_pa_layer_attr saved_pwr_info; - struct workqueue_struct *workq; - struct work_struct suspend_work; - struct work_struct resume_work; unsigned long target_freq; u32 min_gear; bool is_enabled; |