@@ -1028,13 +1028,13 @@ struct pqi_scsi_dev_raid_map_data {
u8 cdb_length;
/* RAID 1 specific */
-#define NUM_RAID1_MAP_ENTRIES 3
+#define NUM_RAID1_MAP_ENTRIES 3
u32 num_it_nexus_entries;
u32 it_nexus[NUM_RAID1_MAP_ENTRIES];
/* RAID 5 / RAID 6 specific */
- u32 p_parity_it_nexus; /* aio_handle */
- u32 q_parity_it_nexus; /* aio_handle */
+ u32 p_parity_it_nexus; /* aio_handle */
+ u32 q_parity_it_nexus; /* aio_handle */
u8 xor_mult;
u64 row;
u64 stripe_lba;
@@ -1044,6 +1044,7 @@ struct pqi_scsi_dev_raid_map_data {
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY commmand */
u8 device_type; /* as reported by */
@@ -1302,7 +1303,8 @@ struct pqi_ctrl_info {
u32 max_transfer_encrypted_sas_sata;
u32 max_transfer_encrypted_nvme;
u32 max_write_raid_5_6;
-
+ u32 max_write_raid_1_10_2drive;
+ u32 max_write_raid_1_10_3drive;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
@@ -1533,6 +1535,8 @@ struct bmic_sense_feature_io_page_aio_subpage {
__le16 max_transfer_encrypted_sas_sata;
__le16 max_transfer_encrypted_nvme;
__le16 max_write_raid_5_6;
+ __le16 max_write_raid_1_10_2drive;
+ __le16 max_write_raid_1_10_3drive;
};
struct bmic_smp_request {
@@ -696,6 +696,19 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
return rc;
}
+static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
+{
+ u32 bytes;
+
+ bytes = get_unaligned_le16(limit);
+ if (bytes == 0)
+ bytes = ~0;
+ else
+ bytes *= 1024;
+
+ return bytes;
+}
+
#pragma pack(1)
struct bmic_sense_feature_buffer {
@@ -707,11 +720,11 @@ struct bmic_sense_feature_buffer {
#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
offsetofend(struct bmic_sense_feature_buffer, \
- aio_subpage.max_write_raid_5_6)
+ aio_subpage.max_write_raid_1_10_3drive)
#define MINIMUM_AIO_SUBPAGE_LENGTH \
(offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
- max_write_raid_5_6) - \
+ max_write_raid_1_10_3drive) - \
sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
@@ -753,33 +766,28 @@ static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
MINIMUM_AIO_SUBPAGE_LENGTH) {
- rc = -EINVAL;
goto error;
}
ctrl_info->max_transfer_encrypted_sas_sata =
- get_unaligned_le16(
+ pqi_aio_limit_to_bytes(
&buffer->aio_subpage.max_transfer_encrypted_sas_sata);
- if (ctrl_info->max_transfer_encrypted_sas_sata)
- ctrl_info->max_transfer_encrypted_sas_sata *= 1024;
- else
- ctrl_info->max_transfer_encrypted_sas_sata = ~0;
ctrl_info->max_transfer_encrypted_nvme =
- get_unaligned_le16(
+ pqi_aio_limit_to_bytes(
&buffer->aio_subpage.max_transfer_encrypted_nvme);
- if (ctrl_info->max_transfer_encrypted_nvme)
- ctrl_info->max_transfer_encrypted_nvme *= 1024;
- else
- ctrl_info->max_transfer_encrypted_nvme = ~0;
ctrl_info->max_write_raid_5_6 =
- get_unaligned_le16(
+ pqi_aio_limit_to_bytes(
&buffer->aio_subpage.max_write_raid_5_6);
- if (ctrl_info->max_write_raid_5_6)
- ctrl_info->max_write_raid_5_6 *= 1024;
- else
- ctrl_info->max_write_raid_5_6 = ~0;
+
+ ctrl_info->max_write_raid_1_10_2drive =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_1_10_2drive);
+
+ ctrl_info->max_write_raid_1_10_3drive =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_1_10_3drive);
error:
kfree(buffer);
@@ -2387,9 +2395,13 @@ static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
case SA_RAID_0:
break;
case SA_RAID_1:
- fallthrough;
+ if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
+ is_supported = false;
+ break;
case SA_RAID_ADM:
- if (rmd->is_write && !ctrl_info->enable_r1_writes)
+ if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
is_supported = false;
break;
case SA_RAID_5:
@@ -8136,6 +8148,8 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
ctrl_info->max_transfer_encrypted_nvme =
PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
+ ctrl_info->max_write_raid_1_10_2drive = ~0;
+ ctrl_info->max_write_raid_1_10_3drive = ~0;
return ctrl_info;
}
@@ -9565,7 +9579,7 @@ static void __attribute__((unused)) verify_structures(void)
page_length) != 2);
BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
- != 14);
+ != 18);
BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
header) != 0);
BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
@@ -9582,6 +9596,10 @@ static void __attribute__((unused)) verify_structures(void)
max_transfer_encrypted_nvme) != 10);
BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
max_write_raid_5_6) != 12);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_1_10_2drive) != 14);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_1_10_3drive) != 16);
BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);