diff options
Diffstat (limited to 'drivers/scsi/smartpqi/smartpqi_init.c')
| -rw-r--r-- | drivers/scsi/smartpqi/smartpqi_init.c | 352 | 
1 files changed, 232 insertions, 120 deletions
| diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index b5396d722d52..7fd5a8c813dc 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -33,11 +33,11 @@  #define BUILD_TIMESTAMP  #endif -#define DRIVER_VERSION		"2.1.28-025" +#define DRIVER_VERSION		"2.1.30-031"  #define DRIVER_MAJOR		2  #define DRIVER_MINOR		1 -#define DRIVER_RELEASE		28 -#define DRIVER_REVISION		25 +#define DRIVER_RELEASE		30 +#define DRIVER_REVISION		31  #define DRIVER_NAME		"Microchip SmartPQI Driver (v" \  				DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -92,9 +92,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,  static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);  static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);  static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); -static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); -static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); -static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); +static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size); +static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor); +static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);  static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,  	struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);  static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); @@ -1508,8 +1508,8 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,  	if (rc)  		goto error; -	device->raid_bypass_cnt = alloc_percpu(u64); -	if (!device->raid_bypass_cnt) { +	device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats); +	if (!device->raid_io_stats) {  		rc = -ENOMEM;  		goto error;  	} @@ -2105,9 +2105,9 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,  				/* To prevent this from being freed later. */  				new_device->raid_map = NULL;  			} -			if (new_device->raid_bypass_enabled && existing_device->raid_bypass_cnt == NULL) { -				existing_device->raid_bypass_cnt = new_device->raid_bypass_cnt; -				new_device->raid_bypass_cnt = NULL; +			if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) { +				existing_device->raid_io_stats = new_device->raid_io_stats; +				new_device->raid_io_stats = NULL;  			}  			existing_device->raid_bypass_configured = new_device->raid_bypass_configured;  			existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; @@ -2131,7 +2131,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,  static inline void pqi_free_device(struct pqi_scsi_dev *device)  {  	if (device) { -		free_percpu(device->raid_bypass_cnt); +		free_percpu(device->raid_io_stats);  		kfree(device->raid_map);  		kfree(device);  	} @@ -2303,17 +2303,23 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,  	 * queue depth, device size.  	 */  	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { +		/* +		 * Check for queue depth change. +		 */  		if (device->sdev && device->queue_depth != device->advertised_queue_depth) {  			device->advertised_queue_depth = device->queue_depth;  			scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); -			spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); -			if (pqi_volume_rescan_needed(device)) { -				device->rescan = false; -				spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); -				scsi_rescan_device(device->sdev); -			} else { -				spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); -			} +		} +		spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); +		/* +		 * Check for changes in the device, such as size. +		 */ +		if (pqi_volume_rescan_needed(device)) { +			device->rescan = false; +			spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); +			scsi_rescan_device(device->sdev); +		} else { +			spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);  		}  	} @@ -3634,7 +3640,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)  		ctrl_info->pqi_mode_enabled = false;  		pqi_save_ctrl_mode(ctrl_info, SIS_MODE);  		rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); -		pqi_ofa_free_host_buffer(ctrl_info); +		pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);  		pqi_ctrl_ofa_done(ctrl_info);  		dev_info(&ctrl_info->pci_dev->dev,  				"Online Firmware Activation: %s\n", @@ -3645,7 +3651,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)  				"Online Firmware Activation ABORTED\n");  		if (ctrl_info->soft_reset_handshake_supported)  			pqi_clear_soft_reset_status(ctrl_info); -		pqi_ofa_free_host_buffer(ctrl_info); +		pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);  		pqi_ctrl_ofa_done(ctrl_info);  		pqi_ofa_ctrl_unquiesce(ctrl_info);  		break; @@ -3655,7 +3661,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)  		dev_err(&ctrl_info->pci_dev->dev,  			"unexpected Online Firmware Activation reset status: 0x%x\n",  			reset_status); -		pqi_ofa_free_host_buffer(ctrl_info); +		pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);  		pqi_ctrl_ofa_done(ctrl_info);  		pqi_ofa_ctrl_unquiesce(ctrl_info);  		pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); @@ -3670,8 +3676,8 @@ static void pqi_ofa_memory_alloc_worker(struct work_struct *work)  	ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);  	pqi_ctrl_ofa_start(ctrl_info); -	pqi_ofa_setup_host_buffer(ctrl_info); -	pqi_ofa_host_memory_update(ctrl_info); +	pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested); +	pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);  }  static void pqi_ofa_quiesce_worker(struct work_struct *work) @@ -3711,7 +3717,7 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,  		dev_info(&ctrl_info->pci_dev->dev,  			"received Online Firmware Activation cancel request: reason: %u\n",  			ctrl_info->ofa_cancel_reason); -		pqi_ofa_free_host_buffer(ctrl_info); +		pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);  		pqi_ctrl_ofa_done(ctrl_info);  		break;  	default: @@ -5942,7 +5948,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,  	int rc;  	struct pqi_scsi_dev *device;  	struct pqi_stream_data *pqi_stream_data; -	struct pqi_scsi_dev_raid_map_data rmd; +	struct pqi_scsi_dev_raid_map_data rmd = { 0 };  	if (!ctrl_info->enable_stream_detection)  		return false; @@ -5984,6 +5990,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,  			pqi_stream_data->next_lba = rmd.first_block +  				rmd.block_cnt;  			pqi_stream_data->last_accessed = jiffies; +			per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;  			return true;  		} @@ -6016,7 +6023,6 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm  	u16 hw_queue;  	struct pqi_queue_group *queue_group;  	bool raid_bypassed; -	u64 *raid_bypass_cnt;  	u8 lun;  	scmd->host_scribble = PQI_NO_COMPLETION; @@ -6063,8 +6069,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm  			rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);  			if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {  				raid_bypassed = true; -				raid_bypass_cnt = per_cpu_ptr(device->raid_bypass_cnt, smp_processor_id()); -				(*raid_bypass_cnt)++; +				per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;  			}  		}  		if (!raid_bypassed) @@ -6201,14 +6206,12 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,  					continue;  				scsi_device = scmd->device->hostdata; -				if (scsi_device != device) -					continue; - -				if ((u8)scmd->device->lun != lun) -					continue;  				list_del(&io_request->request_list_entry); -				set_host_byte(scmd, DID_RESET); +				if (scsi_device == device && (u8)scmd->device->lun == lun) +					set_host_byte(scmd, DID_RESET); +				else +					set_host_byte(scmd, DID_REQUEUE);  				pqi_free_io_request(io_request);  				scsi_dma_unmap(scmd);  				pqi_scsi_done(scmd); @@ -7363,7 +7366,6 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,  	unsigned long flags;  	u64 raid_bypass_cnt;  	int cpu; -	u64 *per_cpu_bypass_cnt_ptr;  	sdev = to_scsi_device(dev);  	ctrl_info = shost_to_hba(sdev->host); @@ -7381,10 +7383,9 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,  	raid_bypass_cnt = 0; -	if (device->raid_bypass_cnt) { +	if (device->raid_io_stats) {  		for_each_online_cpu(cpu) { -			per_cpu_bypass_cnt_ptr = per_cpu_ptr(device->raid_bypass_cnt, cpu); -			raid_bypass_cnt += *per_cpu_bypass_cnt_ptr; +			raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;  		}  	} @@ -7472,6 +7473,43 @@ static ssize_t pqi_numa_node_show(struct device *dev,  	return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);  } +static ssize_t pqi_write_stream_cnt_show(struct device *dev, +	struct device_attribute *attr, char *buffer) +{ +	struct pqi_ctrl_info *ctrl_info; +	struct scsi_device *sdev; +	struct pqi_scsi_dev *device; +	unsigned long flags; +	u64 write_stream_cnt; +	int cpu; + +	sdev = to_scsi_device(dev); +	ctrl_info = shost_to_hba(sdev->host); + +	if (pqi_ctrl_offline(ctrl_info)) +		return -ENODEV; + +	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + +	device = sdev->hostdata; +	if (!device) { +		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); +		return -ENODEV; +	} + +	write_stream_cnt = 0; + +	if (device->raid_io_stats) { +		for_each_online_cpu(cpu) { +			write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt; +		} +	} + +	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + +	return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt); +} +  static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);  static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);  static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); @@ -7482,6 +7520,7 @@ static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);  static DEVICE_ATTR(sas_ncq_prio_enable, 0644,  		pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);  static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); +static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);  static struct attribute *pqi_sdev_attrs[] = {  	&dev_attr_lunid.attr, @@ -7493,6 +7532,7 @@ static struct attribute *pqi_sdev_attrs[] = {  	&dev_attr_raid_bypass_cnt.attr,  	&dev_attr_sas_ncq_prio_enable.attr,  	&dev_attr_numa_node.attr, +	&dev_attr_write_stream_cnt.attr,  	NULL  }; @@ -7883,6 +7923,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,  	case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:  		ctrl_info->multi_lun_device_supported = firmware_feature->enabled;  		break; +	case PQI_FIRMWARE_FEATURE_CTRL_LOGGING: +		ctrl_info->ctrl_logging_supported = firmware_feature->enabled; +		break;  	}  	pqi_firmware_feature_status(ctrl_info, firmware_feature); @@ -7988,6 +8031,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {  		.feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,  		.feature_status = pqi_ctrl_update_feature_flags,  	}, +	{ +		.feature_name = "Controller Data Logging", +		.feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING, +		.feature_status = pqi_ctrl_update_feature_flags, +	},  };  static void pqi_process_firmware_features( @@ -8090,6 +8138,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)  	ctrl_info->firmware_triage_supported = false;  	ctrl_info->rpl_extended_format_4_5_supported = false;  	ctrl_info->multi_lun_device_supported = false; +	ctrl_info->ctrl_logging_supported = false;  }  static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) @@ -8230,6 +8279,9 @@ static void pqi_perform_lockup_action(void)  	}  } +#define PQI_CTRL_LOG_TOTAL_SIZE	(4 * 1024 * 1024) +#define PQI_CTRL_LOG_MIN_SIZE	(PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS) +  static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)  {  	int rc; @@ -8241,6 +8293,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)  			if (rc)  				return rc;  		} +		if (sis_is_ctrl_logging_supported(ctrl_info)) { +			sis_notify_kdump(ctrl_info); +			rc = sis_wait_for_ctrl_logging_completion(ctrl_info); +			if (rc) +				return rc; +		}  		sis_soft_reset(ctrl_info);  		ssleep(PQI_POST_RESET_DELAY_SECS);  	} else { @@ -8422,6 +8480,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)  	if (rc)  		return rc; +	if (ctrl_info->ctrl_logging_supported && !reset_devices) { +		pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); +		pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); +	} +  	rc = pqi_get_ctrl_product_details(ctrl_info);  	if (rc) {  		dev_err(&ctrl_info->pci_dev->dev, @@ -8606,8 +8669,22 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)  		return rc;  	} -	if (pqi_ofa_in_progress(ctrl_info)) +	if (pqi_ofa_in_progress(ctrl_info)) {  		pqi_ctrl_unblock_scan(ctrl_info); +		if (ctrl_info->ctrl_logging_supported) { +			if (!ctrl_info->ctrl_log_memory.host_memory) +				pqi_host_setup_buffer(ctrl_info, +					&ctrl_info->ctrl_log_memory, +					PQI_CTRL_LOG_TOTAL_SIZE, +					PQI_CTRL_LOG_MIN_SIZE); +			pqi_host_memory_update(ctrl_info, +				&ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); +		} else { +			if (ctrl_info->ctrl_log_memory.host_memory) +				pqi_host_free_buffer(ctrl_info, +					&ctrl_info->ctrl_log_memory); +		} +	}  	pqi_scan_scsi_devices(ctrl_info); @@ -8797,6 +8874,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)  		pqi_fail_all_outstanding_requests(ctrl_info);  		ctrl_info->pqi_mode_enabled = false;  	} +	pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);  	pqi_unregister_scsi(ctrl_info);  	if (ctrl_info->pqi_mode_enabled)  		pqi_revert_to_sis_mode(ctrl_info); @@ -8822,177 +8900,187 @@ static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)  	pqi_ctrl_unblock_scan(ctrl_info);  } -static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) +static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) +{ +	ssleep(delay_secs); + +	return pqi_ctrl_init_resume(ctrl_info); +} + +static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info, +	struct pqi_host_memory_descriptor *host_memory_descriptor, +	u32 total_size, u32 chunk_size)  {  	int i;  	u32 sg_count;  	struct device *dev; -	struct pqi_ofa_memory *ofap; +	struct pqi_host_memory *host_memory;  	struct pqi_sg_descriptor *mem_descriptor;  	dma_addr_t dma_handle; -	ofap = ctrl_info->pqi_ofa_mem_virt_addr; -  	sg_count = DIV_ROUND_UP(total_size, chunk_size); -	if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS) +	if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)  		goto out; -	ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); -	if (!ctrl_info->pqi_ofa_chunk_virt_addr) +	host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); +	if (!host_memory_descriptor->host_chunk_virt_address)  		goto out;  	dev = &ctrl_info->pci_dev->dev; +	host_memory = host_memory_descriptor->host_memory;  	for (i = 0; i < sg_count; i++) { -		ctrl_info->pqi_ofa_chunk_virt_addr[i] = -			dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); -		if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) +		host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); +		if (!host_memory_descriptor->host_chunk_virt_address[i])  			goto out_free_chunks; -		mem_descriptor = &ofap->sg_descriptor[i]; +		mem_descriptor = &host_memory->sg_descriptor[i];  		put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);  		put_unaligned_le32(chunk_size, &mem_descriptor->length);  	}  	put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); -	put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); -	put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated); +	put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors); +	put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);  	return 0;  out_free_chunks:  	while (--i >= 0) { -		mem_descriptor = &ofap->sg_descriptor[i]; +		mem_descriptor = &host_memory->sg_descriptor[i];  		dma_free_coherent(dev, chunk_size, -			ctrl_info->pqi_ofa_chunk_virt_addr[i], +			host_memory_descriptor->host_chunk_virt_address[i],  			get_unaligned_le64(&mem_descriptor->address));  	} -	kfree(ctrl_info->pqi_ofa_chunk_virt_addr); - +	kfree(host_memory_descriptor->host_chunk_virt_address);  out:  	return -ENOMEM;  } -static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) +static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info, +	struct pqi_host_memory_descriptor *host_memory_descriptor, +	u32 total_required_size, u32 min_required_size)  { -	u32 total_size;  	u32 chunk_size;  	u32 min_chunk_size; -	if (ctrl_info->ofa_bytes_requested == 0) +	if (total_required_size == 0 || min_required_size == 0)  		return 0; -	total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); -	min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS); +	total_required_size = PAGE_ALIGN(total_required_size); +	min_required_size = PAGE_ALIGN(min_required_size); +	min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);  	min_chunk_size = PAGE_ALIGN(min_chunk_size); -	for (chunk_size = total_size; chunk_size >= min_chunk_size;) { -		if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) -			return 0; -		chunk_size /= 2; -		chunk_size = PAGE_ALIGN(chunk_size); +	while (total_required_size >= min_required_size) { +		for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) { +			if (pqi_host_alloc_mem(ctrl_info, +				host_memory_descriptor, total_required_size, +				chunk_size) == 0) +				return 0; +			chunk_size /= 2; +			chunk_size = PAGE_ALIGN(chunk_size); +		} +		total_required_size /= 2; +		total_required_size = PAGE_ALIGN(total_required_size);  	}  	return -ENOMEM;  } -static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) +static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, +	struct pqi_host_memory_descriptor *host_memory_descriptor, +	u32 total_size, u32 min_size)  {  	struct device *dev; -	struct pqi_ofa_memory *ofap; +	struct pqi_host_memory *host_memory;  	dev = &ctrl_info->pci_dev->dev; -	ofap = dma_alloc_coherent(dev, sizeof(*ofap), -		&ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); -	if (!ofap) +	host_memory = dma_alloc_coherent(dev, sizeof(*host_memory), +		&host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL); +	if (!host_memory)  		return; -	ctrl_info->pqi_ofa_mem_virt_addr = ofap; +	host_memory_descriptor->host_memory = host_memory; -	if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { -		dev_err(dev, -			"failed to allocate host buffer for Online Firmware Activation\n"); -		dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); -		ctrl_info->pqi_ofa_mem_virt_addr = NULL; +	if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor, +		total_size, min_size) < 0) { +		dev_err(dev, "failed to allocate firmware usable host buffer\n"); +		dma_free_coherent(dev, sizeof(*host_memory), host_memory, +			host_memory_descriptor->host_memory_dma_handle); +		host_memory_descriptor->host_memory = NULL;  		return;  	} - -	put_unaligned_le16(PQI_OFA_VERSION, &ofap->version); -	memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));  } -static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) +static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, +	struct pqi_host_memory_descriptor *host_memory_descriptor)  {  	unsigned int i;  	struct device *dev; -	struct pqi_ofa_memory *ofap; +	struct pqi_host_memory *host_memory;  	struct pqi_sg_descriptor *mem_descriptor;  	unsigned int num_memory_descriptors; -	ofap = ctrl_info->pqi_ofa_mem_virt_addr; -	if (!ofap) +	host_memory = host_memory_descriptor->host_memory; +	if (!host_memory)  		return;  	dev = &ctrl_info->pci_dev->dev; -	if (get_unaligned_le32(&ofap->bytes_allocated) == 0) +	if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)  		goto out; -	mem_descriptor = ofap->sg_descriptor; -	num_memory_descriptors = -		get_unaligned_le16(&ofap->num_memory_descriptors); +	mem_descriptor = host_memory->sg_descriptor; +	num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);  	for (i = 0; i < num_memory_descriptors; i++) {  		dma_free_coherent(dev,  			get_unaligned_le32(&mem_descriptor[i].length), -			ctrl_info->pqi_ofa_chunk_virt_addr[i], +			host_memory_descriptor->host_chunk_virt_address[i],  			get_unaligned_le64(&mem_descriptor[i].address));  	} -	kfree(ctrl_info->pqi_ofa_chunk_virt_addr); +	kfree(host_memory_descriptor->host_chunk_virt_address);  out: -	dma_free_coherent(dev, sizeof(*ofap), ofap, -		ctrl_info->pqi_ofa_mem_dma_handle); -	ctrl_info->pqi_ofa_mem_virt_addr = NULL; +	dma_free_coherent(dev, sizeof(*host_memory), host_memory, +		host_memory_descriptor->host_memory_dma_handle); +	host_memory_descriptor->host_memory = NULL;  } -static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) +static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, +	struct pqi_host_memory_descriptor *host_memory_descriptor, +	u16 function_code)  {  	u32 buffer_length;  	struct pqi_vendor_general_request request; -	struct pqi_ofa_memory *ofap; +	struct pqi_host_memory *host_memory;  	memset(&request, 0, sizeof(request));  	request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; -	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, -		&request.header.iu_length); -	put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, -		&request.function_code); - -	ofap = ctrl_info->pqi_ofa_mem_virt_addr; - -	if (ofap) { -		buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) + -			get_unaligned_le16(&ofap->num_memory_descriptors) * -			sizeof(struct pqi_sg_descriptor); - -		put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, -			&request.data.ofa_memory_allocation.buffer_address); -		put_unaligned_le32(buffer_length, -			&request.data.ofa_memory_allocation.buffer_length); +	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); +	put_unaligned_le16(function_code, &request.function_code); + +	host_memory = host_memory_descriptor->host_memory; + +	if (host_memory) { +		buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor); +		put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address); +		put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length); + +		if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) { +			put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version); +			memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature)); +		} else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) { +			put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version); +			memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature)); +		}  	}  	return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);  } -static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) -{ -	ssleep(delay_secs); - -	return pqi_ctrl_init_resume(ctrl_info); -} -  static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {  	.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,  	.status = SAM_STAT_CHECK_CONDITION, @@ -9466,6 +9554,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {  	},  	{  		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, +			       0x193d, 0x0462) +	}, +	{ +		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,  			       0x193d, 0x1104)  	},  	{ @@ -9506,6 +9598,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {  	},  	{  		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, +			       0x193d, 0x8462) +	}, +	{ +		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,  			       0x193d, 0xc460)  	},  	{ @@ -10214,6 +10310,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {  	},  	{  		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, +			       0x1137, 0x02fe) +	}, +	{ +		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, +			       0x1137, 0x02ff) +	}, +	{ +		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, +			       0x1137, 0x0300) +	}, +	{ +		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,  			       0x1ff9, 0x0045)  	},  	{ @@ -10390,6 +10498,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {  	},  	{  		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, +			       0x1ff9, 0x00a3) +	}, +	{ +		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,  			       PCI_ANY_ID, PCI_ANY_ID)  	},  	{ 0 } | 
