@@ -544,6 +544,18 @@ config SCSI_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may not
wish to include it.
+config SCSI_MYLEX
+ tristate "Mylex DAC960/DAC1100 PCI RAID Controller support"
+ depends on PCI
+ help
+ This driver adds support for the Mylex DAC960, AcceleRAID, and
+ eXtremeRAID PCI RAID controllers. See the file
+ <file:Documentation/blockdev/README.DAC960> for further information
+ about this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mylex.
+
config VMWARE_PVSCSI
tristate "VMware PVSCSI driver support"
depends on PCI && SCSI && X86
@@ -110,6 +110,7 @@ obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
obj-$(CONFIG_SCSI_MESH) += mesh.o
obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
+obj-$(CONFIG_SCSI_MYLEX) += mylex.o
obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
new file mode 100644
@@ -0,0 +1,6024 @@
+/*
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ * This program is free software; you may redistribute and/or modify it under
+ * the terms of the GNU General Public License Version 2 as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for complete details.
+ */
+
+
+#define DAC960_DriverName "Mylex"
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+#include <linux/blkpg.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/raid_class.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+#include <linux/uaccess.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "mylex.h"
+
+#define DAC960_MAILBOX_TIMEOUT 1000000
+
+
+static DEFINE_MUTEX(DAC960_mutex);
+static int DAC960_ControllerCount;
+
+static struct raid_template *mylex_v1_raid_template;
+static struct raid_template *mylex_v2_raid_template;
+
+static struct DAC960_V1_DriveStateTbl {
+ DAC960_V1_DriveState_T state;
+ char *name;
+} DAC960_V1_DriveStateNames[] = {
+ { DAC960_V1_Device_Dead, "Dead" },
+ { DAC960_V1_Device_WriteOnly, "WriteOnly" },
+ { DAC960_V1_Device_Online, "Online" },
+ { DAC960_V1_Device_Critical, "Critical" },
+ { DAC960_V1_Device_Standby, "Standby" },
+ { DAC960_V1_Device_Offline, NULL },
+};
+
+static char *DAC960_V1_DriveStateName(DAC960_V1_DriveState_T state)
+{
+ struct DAC960_V1_DriveStateTbl *entry =
+ DAC960_V1_DriveStateNames;
+
+ while (entry && entry->name) {
+ if (entry->state == state)
+ return entry->name;
+ entry++;
+ }
+ return (state == DAC960_V1_Device_Offline) ? "Offline" : "Unknown";
+}
+
+static struct DAC960_V1_RAIDLevelTbl {
+ DAC960_V1_RAIDLevel_T level;
+ char *name;
+} DAC960_V1_RAIDLevelNames[] = {
+ { DAC960_V1_RAID_Level0, "RAID0" },
+ { DAC960_V1_RAID_Level1, "RAID1" },
+ { DAC960_V1_RAID_Level3, "RAID3" },
+ { DAC960_V1_RAID_Level5, "RAID5" },
+ { DAC960_V1_RAID_Level6, "RAID6" },
+ { DAC960_V1_RAID_JBOD, "JBOD" },
+ { 0xff, NULL }
+};
+
+static char *DAC960_V1_RAIDLevelName(DAC960_V1_RAIDLevel_T level)
+{
+ struct DAC960_V1_RAIDLevelTbl *entry =
+ DAC960_V1_RAIDLevelNames;
+
+ while (entry && entry->name) {
+ if (entry->level == level)
+ return entry->name;
+ entry++;
+ }
+ return NULL;
+}
+
+static struct DAC960_V2_DriveStateTbl {
+ DAC960_V2_DriveState_T state;
+ char *name;
+} DAC960_V2_DriveStateNames[] = {
+ { DAC960_V2_Device_Unconfigured, "Unconfigured" },
+ { DAC960_V2_Device_Online, "Online" },
+ { DAC960_V2_Device_Rebuild, "Rebuild" },
+ { DAC960_V2_Device_Missing, "Missing" },
+ { DAC960_V2_Device_SuspectedCritical, "SuspectedCritical" },
+ { DAC960_V2_Device_Offline, "Offline" },
+ { DAC960_V2_Device_Critical, "Critical" },
+ { DAC960_V2_Device_SuspectedDead, "SuspectedDead" },
+ { DAC960_V2_Device_CommandedOffline, "CommandedOffline" },
+ { DAC960_V2_Device_Standby, "Standby" },
+ { DAC960_V2_Device_InvalidState, NULL },
+};
+
+static char *DAC960_V2_DriveStateName(DAC960_V2_DriveState_T state)
+{
+ struct DAC960_V2_DriveStateTbl *entry =
+ DAC960_V2_DriveStateNames;
+
+ while (entry && entry->name) {
+ if (entry->state == state)
+ return entry->name;
+ entry++;
+ }
+ return NULL;
+}
+
+static struct DAC960_V2_RAIDLevelTbl {
+ DAC960_V2_RAIDLevel_T level;
+ char *name;
+} DAC960_V2_RAIDLevelNames[] = {
+ { DAC960_V2_RAID_Level0, "RAID0" },
+ { DAC960_V2_RAID_Level1, "RAID1" },
+ { DAC960_V2_RAID_Level3, "RAID3 right asymmetric parity" },
+ { DAC960_V2_RAID_Level5, "RAID5 right asymmetric parity" },
+ { DAC960_V2_RAID_Level6, "RAID6" },
+ { DAC960_V2_RAID_JBOD, "JBOD" },
+ { DAC960_V2_RAID_NewSpan, "New Mylex SPAN" },
+ { DAC960_V2_RAID_Level3F, "RAID3 fixed parity" },
+ { DAC960_V2_RAID_Level3L, "RAID3 left symmetric parity" },
+ { DAC960_V2_RAID_Span, "Mylex SPAN" },
+ { DAC960_V2_RAID_Level5L, "RAID5 left symmetric parity" },
+ { DAC960_V2_RAID_LevelE, "RAIDE (concatenation)" },
+ { DAC960_V2_RAID_Physical, "Physical device" },
+ { 0xff, NULL }
+};
+
+static char *DAC960_V2_RAIDLevelName(DAC960_V2_RAIDLevel_T level)
+{
+ struct DAC960_V2_RAIDLevelTbl *entry =
+ DAC960_V2_RAIDLevelNames;
+
+ while (entry && entry->name) {
+ if (entry->level == level)
+ return entry->name;
+ entry++;
+ }
+ return NULL;
+}
+
+/*
+ DAC960_V2_ReportProgress prints an appropriate progress message for
+ Logical Device Long Operations.
+*/
+
+static void DAC960_V2_ReportProgress(DAC960_Controller_T *c,
+ unsigned short ldev_num,
+ unsigned char *msg,
+ unsigned long blocks,
+ unsigned long size)
+{
+ shost_printk(KERN_INFO, c->host,
+ "Logical Drive %d: %s in Progress: %ld%% completed\n",
+ ldev_num, msg, (100 * (blocks >> 7)) / (size >> 7));
+}
+
+/*
+ init_dma_loaf() and slice_dma_loaf() are helper functions for
+ aggregating the dma-mapped memory for a well-known collection of
+ data structures that are of different lengths.
+
+ These routines don't guarantee any alignment. The caller must
+ include any space needed for alignment in the sizes of the structures
+ that are passed in.
+*/
+
+static bool init_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf,
+ size_t len)
+{
+ void *cpu_addr;
+ dma_addr_t dma_handle;
+
+ cpu_addr = pci_alloc_consistent(dev, len, &dma_handle);
+ if (cpu_addr == NULL)
+ return false;
+
+ loaf->cpu_free = loaf->cpu_base = cpu_addr;
+ loaf->dma_free =loaf->dma_base = dma_handle;
+ loaf->length = len;
+ memset(cpu_addr, 0, len);
+ return true;
+}
+
+static void *slice_dma_loaf(struct dma_loaf *loaf, size_t len,
+ dma_addr_t *dma_handle)
+{
+ void *cpu_end = loaf->cpu_free + len;
+ void *cpu_addr = loaf->cpu_free;
+
+ BUG_ON(cpu_end > loaf->cpu_base + loaf->length);
+ *dma_handle = loaf->dma_free;
+ loaf->cpu_free = cpu_end;
+ loaf->dma_free += len;
+ return cpu_addr;
+}
+
+static void free_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf_handle)
+{
+ if (loaf_handle->cpu_base != NULL)
+ pci_free_consistent(dev, loaf_handle->length,
+ loaf_handle->cpu_base, loaf_handle->dma_base);
+}
+
+/*
+ DAC960_CreateAuxiliaryStructures allocates and initializes the auxiliary
+ data structures for Controller. It returns true on success and false on
+ failure.
+*/
+
+static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *c)
+{
+ struct pci_dev *pdev = c->PCIDevice;
+ struct pci_pool *ScatterGatherPool;
+ struct pci_pool *RequestSensePool = NULL;
+ struct pci_pool *DCDBPool = NULL;
+ size_t elem_size, elem_align;
+
+ if (c->FirmwareType == DAC960_V1_Controller) {
+ elem_align = sizeof(DAC960_V1_ScatterGatherSegment_T);
+ elem_size = c->host->sg_tablesize * elem_align;
+ ScatterGatherPool = pci_pool_create("DAC960_V1_ScatterGather",
+ pdev, elem_size,
+ elem_align, 0);
+ if (ScatterGatherPool == NULL) {
+ shost_printk(KERN_ERR, c->host,
+ "Failed to allocate SG pool\n");
+ return false;
+ }
+ elem_size = sizeof(DAC960_V1_DCDB_T);
+ elem_align = sizeof(unsigned int);
+ DCDBPool = pci_pool_create("DAC960_V1_DCDB",
+ pdev, elem_size, elem_align, 0);
+ if (!DCDBPool) {
+ pci_pool_destroy(ScatterGatherPool);
+ shost_printk(KERN_ERR, c->host,
+ "Failed to allocate DCDB pool\n");
+ return false;
+ }
+ c->ScatterGatherPool = ScatterGatherPool;
+ c->V1.DCDBPool = DCDBPool;
+ } else {
+ elem_align = sizeof(DAC960_V2_ScatterGatherSegment_T);
+ elem_size = c->host->sg_tablesize * elem_align;
+ ScatterGatherPool = pci_pool_create("DAC960_V2_ScatterGather",
+ pdev, elem_size,
+ elem_align, 0);
+ if (ScatterGatherPool == NULL) {
+ shost_printk(KERN_ERR, c->host,
+ "Failed to allocate SG pool\n");
+ return false;
+ }
+ elem_size = DAC960_V2_SENSE_BUFFERSIZE;
+ elem_align = sizeof(int);
+ RequestSensePool = pci_pool_create("DAC960_V2_RequestSense",
+ pdev, elem_size,
+ elem_align, 0);
+ if (RequestSensePool == NULL) {
+ pci_pool_destroy(ScatterGatherPool);
+ shost_printk(KERN_ERR, c->host,
+ "Failed to allocate sense data pool\n");
+ return false;
+ }
+ elem_size = DAC960_V2_DCDB_SIZE;
+ elem_align = sizeof(unsigned char);
+ DCDBPool = pci_pool_create("DAC960_V2_DCDB",
+ pdev, elem_size, elem_align, 0);
+ if (!DCDBPool) {
+ pci_pool_destroy(ScatterGatherPool);
+ pci_pool_destroy(RequestSensePool);
+ shost_printk(KERN_ERR, c->host,
+ "Failed to allocate DCDB pool\n");
+ return false;
+ }
+ c->ScatterGatherPool = ScatterGatherPool;
+ c->V2.RequestSensePool = RequestSensePool;
+ c->V2.DCDBPool = DCDBPool;
+ }
+ return true;
+}
+
+
+/*
+ DAC960_DestroyAuxiliaryStructures deallocates the auxiliary data
+ structures for Controller.
+*/
+
+static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *c)
+{
+ if (c->ScatterGatherPool != NULL)
+ pci_pool_destroy(c->ScatterGatherPool);
+
+ if (c->FirmwareType == DAC960_V1_Controller) {
+ if (c->V1.DCDBPool)
+ pci_pool_destroy(c->V1.DCDBPool);
+ } else {
+ if (c->V2.DCDBPool)
+ pci_pool_destroy(c->V2.DCDBPool);
+ if (c->V2.RequestSensePool)
+ pci_pool_destroy(c->V2.RequestSensePool);
+ }
+}
+
+
+/*
+ DAC960_V1_ClearCommand clears critical fields of Command for DAC960 V1
+ Firmware Controllers.
+*/
+
+static inline void DAC960_V1_ClearCommand(DAC960_V1_CommandBlock_T *cmd_blk)
+{
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+
+ memset(mbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
+ cmd_blk->status = 0;
+}
+
+
+/*
+ DAC960_V2_ClearCommand clears critical fields of Command for DAC960 V2
+ Firmware Controllers.
+*/
+
+static inline void DAC960_V2_ClearCommand(DAC960_V2_CommandBlock_T *cmd_blk)
+{
+ DAC960_V2_CommandMailbox_T *mbox = &cmd_blk->mbox;
+
+ memset(mbox, 0, sizeof(DAC960_V2_CommandMailbox_T));
+ cmd_blk->status = 0;
+}
+
+
+/*
+ * DAC960_V2_QueueCommand queues Command for DAC960 V2 Series Controllers.
+ */
+static void DAC960_V2_QueueCommand(DAC960_Controller_T *c,
+ DAC960_V2_CommandBlock_T *cmd_blk)
+{
+ void __iomem *base = c->BaseAddress;
+ DAC960_V2_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V2_CommandMailbox_T *next_mbox =
+ c->V2.NextCommandMailbox;
+
+ c->V2.WriteCommandMailbox(next_mbox, mbox);
+
+ if (c->V2.PreviousCommandMailbox1->Words[0] == 0 ||
+ c->V2.PreviousCommandMailbox2->Words[0] == 0)
+ c->V2.MailboxNewCommand(base);
+
+ c->V2.PreviousCommandMailbox2 =
+ c->V2.PreviousCommandMailbox1;
+ c->V2.PreviousCommandMailbox1 = next_mbox;
+
+ if (++next_mbox > c->V2.LastCommandMailbox)
+ next_mbox = c->V2.FirstCommandMailbox;
+
+ c->V2.NextCommandMailbox = next_mbox;
+}
+
+/*
+ * DAC960_V1_QueueCommand queues Command for DAC960 V1 Series Controller
+ */
+
+static void DAC960_V1_QueueCommand(DAC960_Controller_T *c,
+ DAC960_V1_CommandBlock_T *cmd_blk)
+{
+ void __iomem *base = c->BaseAddress;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V1_CommandMailbox_T *next_mbox =
+ c->V1.NextCommandMailbox;
+
+ c->V1.WriteCommandMailbox(next_mbox, mbox);
+ if (c->V1.PreviousCommandMailbox1->Words[0] == 0 ||
+ c->V1.PreviousCommandMailbox2->Words[0] == 0)
+ c->V1.MailboxNewCommand(base);
+ c->V1.PreviousCommandMailbox2 =
+ c->V1.PreviousCommandMailbox1;
+ c->V1.PreviousCommandMailbox1 = next_mbox;
+ if (++next_mbox > c->V1.LastCommandMailbox)
+ next_mbox = c->V1.FirstCommandMailbox;
+ c->V1.NextCommandMailbox = next_mbox;
+}
+
+/*
+ DAC960_PD_QueueCommand queues Command for DAC960 PD Series Controllers.
+*/
+
+static void DAC960_PD_QueueCommand(DAC960_Controller_T *c,
+ DAC960_V1_CommandBlock_T *cmd_blk)
+{
+ void __iomem *base = c->BaseAddress;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+
+ while (DAC960_PD_MailboxFullP(base))
+ udelay(1);
+ DAC960_PD_WriteCommandMailbox(base, mbox);
+ DAC960_PD_NewCommand(base);
+}
+
+
+/*
+ DAC960_P_QueueCommand queues Command for DAC960 P Series Controllers.
+*/
+
+static void DAC960_P_QueueCommand(DAC960_Controller_T *c,
+ DAC960_V1_CommandBlock_T *cmd_blk)
+{
+ void __iomem *base = c->BaseAddress;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+
+ switch (mbox->Common.opcode) {
+ case DAC960_V1_Enquiry:
+ mbox->Common.opcode = DAC960_V1_Enquiry_Old;
+ break;
+ case DAC960_V1_GetDeviceState:
+ mbox->Common.opcode = DAC960_V1_GetDeviceState_Old;
+ break;
+ case DAC960_V1_Read:
+ mbox->Common.opcode = DAC960_V1_Read_Old;
+ DAC960_PD_To_P_TranslateReadWriteCommand(cmd_blk);
+ break;
+ case DAC960_V1_Write:
+ mbox->Common.opcode = DAC960_V1_Write_Old;
+ DAC960_PD_To_P_TranslateReadWriteCommand(cmd_blk);
+ break;
+ case DAC960_V1_ReadWithScatterGather:
+ mbox->Common.opcode = DAC960_V1_ReadWithScatterGather_Old;
+ DAC960_PD_To_P_TranslateReadWriteCommand(cmd_blk);
+ break;
+ case DAC960_V1_WriteWithScatterGather:
+ mbox->Common.opcode = DAC960_V1_WriteWithScatterGather_Old;
+ DAC960_PD_To_P_TranslateReadWriteCommand(cmd_blk);
+ break;
+ default:
+ break;
+ }
+ while (DAC960_PD_MailboxFullP(base))
+ udelay(1);
+ DAC960_PD_WriteCommandMailbox(base, mbox);
+ DAC960_PD_NewCommand(base);
+}
+
+/*
+ * DAC960_V1_ExecuteCommand executes V1 Command and waits for completion.
+ */
+
+static void DAC960_V1_ExecuteCommand(DAC960_Controller_T *c,
+ DAC960_V1_CommandBlock_T *cmd_blk)
+{
+ DECLARE_COMPLETION_ONSTACK(Completion);
+ unsigned long flags;
+
+ cmd_blk->Completion = &Completion;
+
+ spin_lock_irqsave(&c->queue_lock, flags);
+ c->V1.QueueCommand(c, cmd_blk);
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+
+ if (in_interrupt())
+ return;
+ wait_for_completion(&Completion);
+}
+
+/*
+ * DAC960_V2_ExecuteCommand executes V1 Command and waits for completion.
+ */
+
+static void DAC960_V2_ExecuteCommand(DAC960_Controller_T *c,
+ DAC960_V2_CommandBlock_T *cmd_blk)
+{
+ DECLARE_COMPLETION_ONSTACK(Completion);
+ unsigned long flags;
+
+ cmd_blk->Completion = &Completion;
+ spin_lock_irqsave(&c->queue_lock, flags);
+ c->V2.QueueCommand(c, cmd_blk);
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+
+ if (in_interrupt())
+ return;
+ wait_for_completion(&Completion);
+}
+
+
+/*
+ DAC960_V1_ExecuteType3 executes a DAC960 V1 Firmware Controller Type 3
+ Command and waits for completion. It returns true on success and false
+ on failure.
+*/
+
+static unsigned short DAC960_V1_ExecuteType3(DAC960_Controller_T *c,
+ DAC960_V1_CommandOpcode_T op,
+ dma_addr_t DataDMA)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.DirectCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&c->V1.dcmd_mutex);
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3.id = DAC960_DirectCommandIdentifier;
+ mbox->Type3.opcode = op;
+ mbox->Type3.BusAddress = DataDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V1.dcmd_mutex);
+ return status;
+}
+
+
+/*
+ DAC960_V1_ExecuteTypeB executes a DAC960 V1 Firmware Controller Type 3B
+ Command and waits for completion. It returns true on success and false
+ on failure.
+*/
+
+static unsigned short DAC960_V1_ExecuteType3B(DAC960_Controller_T *c,
+ DAC960_V1_CommandOpcode_T op,
+ unsigned char CommandOpcode2,
+ dma_addr_t DataDMA)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.DirectCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&c->V1.dcmd_mutex);
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3B.id = DAC960_DirectCommandIdentifier;
+ mbox->Type3B.opcode = op;
+ mbox->Type3B.CommandOpcode2 = CommandOpcode2;
+ mbox->Type3B.BusAddress = DataDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V1.dcmd_mutex);
+ return status;
+}
+
+
+/*
+ DAC960_V1_ExecuteType3D executes a DAC960 V1 Firmware Controller Type 3D
+ Command and waits for completion. It returns true on success and false
+ on failure.
+*/
+
+static unsigned short DAC960_V1_ExecuteType3D(DAC960_Controller_T *c,
+ DAC960_V1_CommandOpcode_T op,
+ struct scsi_device *sdev)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.DirectCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V1_DeviceState_T *pdev_info = sdev->hostdata;
+ unsigned short status;
+
+ if (!pdev_info) {
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
+ if (!pdev_info)
+ return DAC960_V1_OutOfMemory;
+ }
+ mutex_lock(&c->V1.dcmd_mutex);
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3D.id = DAC960_DirectCommandIdentifier;
+ mbox->Type3D.opcode = op;
+ mbox->Type3D.Channel = sdev->channel;
+ mbox->Type3D.TargetID = sdev->id;
+ mbox->Type3D.BusAddress = c->V1.NewDeviceStateDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ if (status == DAC960_V1_NormalCompletion)
+ memcpy(pdev_info, c->V1.NewDeviceState, sizeof(*pdev_info));
+ else {
+ kfree(pdev_info);
+ pdev_info = NULL;
+ }
+ mutex_unlock(&c->V1.dcmd_mutex);
+
+ if (!sdev->hostdata && pdev_info)
+ sdev->hostdata = pdev_info;
+ if (sdev->hostdata && !pdev_info)
+ sdev->hostdata = NULL;
+ return status;
+}
+
+
+/*
+ DAC960_V1_GetEventLog executes a DAC960 V1 Firmware Controller Type 3E
+ Command and waits for completion. It returns true on success and false
+ on failure.
+*/
+
+static unsigned short DAC960_V1_MonitorGetEventLog(DAC960_Controller_T *c,
+ unsigned int event)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.MonitoringCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned short status;
+ static char *DAC960_EventMessages[] =
+ { "killed because write recovery failed",
+ "killed because of SCSI bus reset failure",
+ "killed because of double check condition",
+ "killed because it was removed",
+ "killed because of gross error on SCSI chip",
+ "killed because of bad tag returned from drive",
+ "killed because of timeout on SCSI command",
+ "killed because of reset SCSI command issued from system",
+ "killed because busy or parity error count exceeded limit",
+ "killed because of 'kill drive' command from system",
+ "killed because of selection timeout",
+ "killed due to SCSI phase sequence error",
+ "killed due to unknown status" };
+
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3E.id = DAC960_MonitoringIdentifier;
+ mbox->Type3E.opcode = DAC960_V1_PerformEventLogOperation;
+ mbox->Type3E.OperationType = DAC960_V1_GetEventLogEntry;
+ mbox->Type3E.OperationQualifier = 1;
+ mbox->Type3E.SequenceNumber = event;
+ mbox->Type3E.BusAddress = c->V1.EventLogEntryDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ if (status == DAC960_V1_NormalCompletion) {
+ DAC960_V1_EventLogEntry_T *EventLogEntry =
+ c->V1.EventLogEntry;
+ if (EventLogEntry->SequenceNumber == event) {
+ struct scsi_sense_hdr sshdr;
+
+ memset(&sshdr, 0, sizeof(sshdr));
+ scsi_normalize_sense(EventLogEntry->SenseData, 32,
+ &sshdr);
+
+ if (sshdr.sense_key == VENDOR_SPECIFIC &&
+ sshdr.asc == 0x80 &&
+ sshdr.ascq < ARRAY_SIZE(DAC960_EventMessages)) {
+ shost_printk(KERN_CRIT, c->host,
+ "Physical drive %d:%d: %s\n",
+ EventLogEntry->Channel,
+ EventLogEntry->TargetID,
+ DAC960_EventMessages[sshdr.ascq]);
+ } else {
+ shost_printk(KERN_CRIT, c->host,
+ "Physical drive %d:%d: "
+ "Sense: %X/%02X/%02X\n",
+ EventLogEntry->Channel,
+ EventLogEntry->TargetID,
+ sshdr.sense_key,
+ sshdr.asc, sshdr.ascq);
+ }
+ }
+ } else
+ shost_printk(KERN_INFO, c->host,
+ "Failed to get event log %d, status %04x\n",
+ event, status);
+
+ return status;
+}
+
+/*
+ DAC960_V1_GetErrorTable executes a DAC960 V1 Firmware Controller Type 3
+ Command and waits for completion. It returns true on success and false
+ on failure.
+*/
+
+static void DAC960_V1_MonitorGetErrorTable(DAC960_Controller_T *c)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.MonitoringCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3.id = DAC960_MonitoringIdentifier;
+ mbox->Type3.opcode = DAC960_V1_GetErrorTable;
+ mbox->Type3.BusAddress = c->V1.NewErrorTableDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ if (status == DAC960_V1_NormalCompletion) {
+ DAC960_V1_ErrorTable_T *old_table = &c->V1.ErrorTable;
+ DAC960_V1_ErrorTable_T *new_table = c->V1.NewErrorTable;
+ DAC960_V1_ErrorTableEntry_T *new_entry, *old_entry;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, c->host) {
+ if (sdev->channel >= c->PhysicalChannelCount)
+ continue;
+ new_entry =
+ &new_table->ErrorTableEntries[sdev->channel][sdev->id];
+ old_entry =
+ &old_table->ErrorTableEntries[sdev->channel][sdev->id];
+ if ((new_entry->ParityErrorCount !=
+ old_entry->ParityErrorCount) ||
+ (new_entry->SoftErrorCount !=
+ old_entry->SoftErrorCount) ||
+ (new_entry->HardErrorCount !=
+ old_entry->HardErrorCount) ||
+ (new_entry->MiscErrorCount !=
+ old_entry->MiscErrorCount))
+ sdev_printk(KERN_CRIT, sdev,
+ "Errors: "
+ "Parity = %d, Soft = %d, "
+ "Hard = %d, Misc = %d\n",
+ new_entry->ParityErrorCount,
+ new_entry->SoftErrorCount,
+ new_entry->HardErrorCount,
+ new_entry->MiscErrorCount);
+ }
+ memcpy(&c->V1.ErrorTable, c->V1.NewErrorTable,
+ sizeof(DAC960_V1_ErrorTable_T));
+ }
+}
+
+/*
+ DAC960_V1_GetLogicalDriveInfo executes a DAC960 V1 Firmware Controller Type 3
+ Command and waits for completion. It returns true on success and false
+ on failure.
+*/
+
+static unsigned short DAC960_V1_GetLogicalDriveInfo(DAC960_Controller_T *c)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.DirectCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&c->V1.dcmd_mutex);
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3.id = DAC960_DirectCommandIdentifier;
+ mbox->Type3.opcode = DAC960_V1_GetLogicalDeviceInfo;
+ mbox->Type3.BusAddress = c->V1.LogicalDeviceInfoDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V1.dcmd_mutex);
+ if (status == DAC960_V1_NormalCompletion) {
+ int ldev_num;
+ for (ldev_num = 0; ldev_num < c->LogicalDriveCount; ldev_num++) {
+ DAC960_V1_LogicalDeviceInfo_T *old = NULL;
+ DAC960_V1_LogicalDeviceInfo_T *new =
+ c->V1.LogicalDeviceInfo[ldev_num];
+ struct scsi_device *sdev;
+ unsigned short ldev_num;
+ DAC960_V1_DriveState_T old_state =
+ DAC960_V1_Device_Offline;
+
+ sdev = scsi_device_lookup(c->host,
+ c->PhysicalChannelCount,
+ ldev_num, 0);
+ if (sdev && sdev->hostdata)
+ old = sdev->hostdata;
+ else if (new->State == DAC960_V1_Device_Online) {
+ shost_printk(KERN_INFO, c->host,
+ "Logical Drive %d is now Online\n",
+ ldev_num);
+ scsi_add_device(c->host,
+ c->PhysicalChannelCount,
+ ldev_num, 0);
+ break;
+ }
+ if (old)
+ old_state = old->State;
+ if (new->State != old_state)
+ shost_printk(KERN_INFO, c->host,
+ "Logical Drive %d is now %s\n",
+ ldev_num,
+ DAC960_V1_DriveStateName(new->State));
+ if (old && new->WriteBack != old->WriteBack)
+ sdev_printk(KERN_INFO, sdev,
+ "Logical Drive is now %s\n",
+ (new->WriteBack
+ ? "WRITE BACK" : "WRITE THRU"));
+ if (old)
+ memcpy(old, new, sizeof(*new));
+ }
+ }
+ return status;
+}
+
+
+/*
+ DAC960_V1_RebuildProgress executes a DAC960 V1 Firmware Controller Type 3
+ Command and waits for completion. It returns true on success and false
+ on failure.
+*/
+
+static void DAC960_V1_MonitorRebuildProgress(DAC960_Controller_T *c)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.MonitoringCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3.id = DAC960_MonitoringIdentifier;
+ mbox->Type3.opcode = DAC960_V1_GetRebuildProgress;
+ mbox->Type3.BusAddress = c->V1.RebuildProgressDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ if (status == DAC960_V1_NormalCompletion) {
+ unsigned int ldev_num =
+ c->V1.RebuildProgress->LogicalDriveNumber;
+ unsigned int LogicalDriveSize =
+ c->V1.RebuildProgress->LogicalDriveSize;
+ unsigned int BlocksCompleted =
+ LogicalDriveSize - c->V1.RebuildProgress->RemainingBlocks;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(c->host,
+ c->PhysicalChannelCount,
+ ldev_num, 0);
+ if (status == DAC960_V1_NoRebuildOrCheckInProgress &&
+ c->V1.LastRebuildStatus == DAC960_V1_NormalCompletion)
+ status = DAC960_V1_RebuildSuccessful;
+ switch (status) {
+ case DAC960_V1_NormalCompletion:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild in Progress, "
+ "%d%% completed\n",
+ (100 * (BlocksCompleted >> 7))
+ / (LogicalDriveSize >> 7));
+ break;
+ case DAC960_V1_RebuildFailed_LogicalDriveFailure:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to "
+ "Logical Drive Failure\n");
+ break;
+ case DAC960_V1_RebuildFailed_BadBlocksOnOther:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to "
+ "Bad Blocks on Other Drives\n");
+ break;
+ case DAC960_V1_RebuildFailed_NewDriveFailed:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to "
+ "Failure of Drive Being Rebuilt\n");
+ break;
+ case DAC960_V1_NoRebuildOrCheckInProgress:
+ break;
+ case DAC960_V1_RebuildSuccessful:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Completed Successfully\n");
+ break;
+ case DAC960_V1_RebuildSuccessfullyTerminated:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Successfully Terminated\n");
+ break;
+ }
+ c->V1.LastRebuildStatus = status;
+ }
+}
+
+
+/*
+ DAC960_V1_ConsistencyCheckProgress executes a DAC960 V1 Firmware Controller
+ Type 3 Command and waits for completion.
+*/
+
+static void DAC960_V1_ConsistencyCheckProgress(DAC960_Controller_T *c)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.MonitoringCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3.id = DAC960_MonitoringIdentifier;
+ mbox->Type3.opcode = DAC960_V1_RebuildStat;
+ mbox->Type3.BusAddress = c->V1.RebuildProgressDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ if (status == DAC960_V1_NormalCompletion) {
+ unsigned int ldev_num =
+ c->V1.RebuildProgress->LogicalDriveNumber;
+ unsigned int LogicalDriveSize =
+ c->V1.RebuildProgress->LogicalDriveSize;
+ unsigned int BlocksCompleted =
+ LogicalDriveSize - c->V1.RebuildProgress->RemainingBlocks;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(c->host, c->PhysicalChannelCount,
+ ldev_num, 0);
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check in Progress: %d%% completed\n",
+ (100 * (BlocksCompleted >> 7))
+ / (LogicalDriveSize >> 7));
+ }
+}
+
+
+/*
+ DAC960_V1_BackgroundInitialization executes a DAC960 V1 Firmware Controller
+ Type 3B Command and waits for completion.
+*/
+
+static void DAC960_V1_BackgroundInitialization(DAC960_Controller_T *c)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.MonitoringCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V1_BackgroundInitializationStatus_T *bgi, *last_bgi;
+ struct scsi_device *sdev;
+ unsigned short status;
+
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3B.id = DAC960_DirectCommandIdentifier;
+ mbox->Type3B.opcode = DAC960_V1_BackgroundInitializationControl;
+ mbox->Type3B.CommandOpcode2 = 0x20;
+ mbox->Type3B.BusAddress = c->V1.BackgroundInitializationStatusDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ bgi = c->V1.BackgroundInitializationStatus;
+ last_bgi = &c->V1.LastBackgroundInitializationStatus;
+ sdev = scsi_device_lookup(c->host, c->PhysicalChannelCount,
+ bgi->LogicalDriveNumber, 0);
+ switch (status) {
+ case DAC960_V1_NormalCompletion:
+ switch (bgi->Status) {
+ case DAC960_V1_BackgroundInitializationInvalid:
+ break;
+ case DAC960_V1_BackgroundInitializationStarted:
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Started\n");
+ break;
+ case DAC960_V1_BackgroundInitializationInProgress:
+ if (bgi->BlocksCompleted ==
+ last_bgi->BlocksCompleted &&
+ bgi->LogicalDriveNumber ==
+ last_bgi->LogicalDriveNumber)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization in Progress: "
+ "%d%% completed\n",
+ (100 * (bgi->BlocksCompleted >> 7))
+ / (bgi->LogicalDriveSize >> 7));
+ break;
+ case DAC960_V1_BackgroundInitializationSuspended:
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Suspended\n");
+ break;
+ case DAC960_V1_BackgroundInitializationCancelled:
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Cancelled\n");
+ break;
+ }
+ memcpy(&c->V1.LastBackgroundInitializationStatus,
+ c->V1.BackgroundInitializationStatus,
+ sizeof(DAC960_V1_BackgroundInitializationStatus_T));
+ break;
+ case DAC960_V1_BackgroundInitSuccessful:
+ if (bgi->Status ==
+ DAC960_V1_BackgroundInitializationInProgress)
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization "
+ "Completed Successfully\n");
+ bgi->Status = DAC960_V1_BackgroundInitializationInvalid;
+ break;
+ case DAC960_V1_BackgroundInitAborted:
+ if (bgi->Status ==
+ DAC960_V1_BackgroundInitializationInProgress)
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Aborted\n");
+ bgi->Status = DAC960_V1_BackgroundInitializationInvalid;
+ break;
+ case DAC960_V1_NoBackgroundInitInProgress:
+ break;
+ }
+}
+
+/*
+ DAC960_V1_ConsistencyCheckProgress executes a DAC960 V1 Firmware Controller
+ Type 3 Command and waits for completion.
+*/
+
+static unsigned short DAC960_V1_NewEnquiry(DAC960_Controller_T *c)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.DirectCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&c->V1.dcmd_mutex);
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3.id = DAC960_DirectCommandIdentifier;
+ mbox->Type3.opcode = DAC960_V1_Enquiry;
+ mbox->Type3.BusAddress = c->V1.NewEnquiryDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V1.dcmd_mutex);
+ if (status == DAC960_V1_NormalCompletion) {
+ DAC960_V1_Enquiry_T *old = &c->V1.Enquiry;
+ DAC960_V1_Enquiry_T *new = c->V1.NewEnquiry;
+ if (new->NumberOfLogicalDrives > c->LogicalDriveCount) {
+ int ldev_num = c->LogicalDriveCount - 1;
+ while (++ldev_num < new->NumberOfLogicalDrives)
+ shost_printk(KERN_CRIT, c->host,
+ "Logical Drive %d Now Exists\n",
+ ldev_num);
+ c->LogicalDriveCount = new->NumberOfLogicalDrives;
+ }
+ if (new->NumberOfLogicalDrives < c->LogicalDriveCount) {
+ int ldev_num = new->NumberOfLogicalDrives - 1;
+ while (++ldev_num < c->LogicalDriveCount)
+ shost_printk(KERN_CRIT, c->host,
+ "Logical Drive %d No Longer Exists\n",
+ ldev_num);
+ c->LogicalDriveCount = new->NumberOfLogicalDrives;
+ }
+ if (new->StatusFlags.DeferredWriteError !=
+ old->StatusFlags.DeferredWriteError)
+ shost_printk(KERN_CRIT, c->host,
+ "Deferred Write Error Flag is now %s\n",
+ (new->StatusFlags.DeferredWriteError
+ ? "TRUE" : "FALSE"));
+ if (new->EventLogSequenceNumber !=
+ old->EventLogSequenceNumber) {
+ c->V1.NewEventLogSequenceNumber =
+ new->EventLogSequenceNumber;
+ c->V1.NeedErrorTableInformation = true;
+ shost_printk(KERN_INFO, c->host,
+ "Event log %d/%d (%d/%d) available\n",
+ c->V1.OldEventLogSequenceNumber,
+ c->V1.NewEventLogSequenceNumber,
+ old->EventLogSequenceNumber,
+ new->EventLogSequenceNumber);
+ }
+ if ((new->CriticalLogicalDriveCount > 0 ||
+ new->CriticalLogicalDriveCount !=
+ old->CriticalLogicalDriveCount) ||
+ (new->OfflineLogicalDriveCount > 0 ||
+ new->OfflineLogicalDriveCount !=
+ old->OfflineLogicalDriveCount) ||
+ (new->NumberOfLogicalDrives !=
+ old->NumberOfLogicalDrives)) {
+ shost_printk(KERN_INFO, c->host,
+ "Logical drive count changed (%d/%d/%d)\n",
+ new->CriticalLogicalDriveCount,
+ new->OfflineLogicalDriveCount,
+ new->NumberOfLogicalDrives);
+ c->V1.NeedLogicalDeviceInfo = true;
+ }
+ if ((new->DeadDriveCount > 0 ||
+ new->DeadDriveCount != old->DeadDriveCount) ||
+ time_after_eq(jiffies, c->SecondaryMonitoringTime
+ + DAC960_SecondaryMonitoringInterval)) {
+ c->V1.NeedBackgroundInitializationStatus =
+ c->V1.BackgroundInitializationStatusSupported;
+ c->SecondaryMonitoringTime = jiffies;
+ }
+ if (new->RebuildFlag == DAC960_V1_StandbyRebuildInProgress ||
+ new->RebuildFlag
+ == DAC960_V1_BackgroundRebuildInProgress ||
+ old->RebuildFlag == DAC960_V1_StandbyRebuildInProgress ||
+ old->RebuildFlag == DAC960_V1_BackgroundRebuildInProgress) {
+ c->V1.NeedRebuildProgress = true;
+ c->V1.RebuildProgressFirst =
+ (new->CriticalLogicalDriveCount <
+ old->CriticalLogicalDriveCount);
+ }
+ if (old->RebuildFlag == DAC960_V1_BackgroundCheckInProgress)
+ switch (new->RebuildFlag) {
+ case DAC960_V1_NoStandbyRebuildOrCheckInProgress:
+ shost_printk(KERN_INFO, c->host,
+ "Consistency Check Completed Successfully\n");
+ break;
+ case DAC960_V1_StandbyRebuildInProgress:
+ case DAC960_V1_BackgroundRebuildInProgress:
+ break;
+ case DAC960_V1_BackgroundCheckInProgress:
+ c->V1.NeedConsistencyCheckProgress = true;
+ break;
+ case DAC960_V1_StandbyRebuildCompletedWithError:
+ shost_printk(KERN_INFO, c->host,
+ "Consistency Check Completed with Error\n");
+ break;
+ case DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed:
+ shost_printk(KERN_INFO, c->host,
+ "Consistency Check Failed - "
+ "Physical Device Failed\n");
+ break;
+ case DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed:
+ shost_printk(KERN_INFO, c->host,
+ "Consistency Check Failed - "
+ "Logical Drive Failed\n");
+ break;
+ case DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses:
+ shost_printk(KERN_INFO, c->host,
+ "Consistency Check Failed - Other Causes\n");
+ break;
+ case DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated:
+ shost_printk(KERN_INFO, c->host,
+ "Consistency Check Successfully Terminated\n");
+ break;
+ }
+ else if (new->RebuildFlag
+ == DAC960_V1_BackgroundCheckInProgress)
+ c->V1.NeedConsistencyCheckProgress = true;
+ if (new->RebuildFlag > DAC960_V1_BackgroundCheckInProgress) {
+ c->V1.PendingRebuildFlag = new->RebuildFlag;
+ c->V1.RebuildFlagPending = true;
+ }
+ memcpy(old, new, sizeof(DAC960_V1_Enquiry_T));
+ }
+ return status;
+}
+
+/*
+ DAC960_V1_SetDeviceState sets the Device State for a Physical Device for
+ DAC960 V1 Firmware Controllers.
+*/
+
+static unsigned short DAC960_V1_SetDeviceState(DAC960_Controller_T *c,
+ struct scsi_device *sdev,
+ DAC960_V1_DriveState_T State)
+{
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.DirectCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&c->V1.dcmd_mutex);
+ mbox->Type3D.opcode = DAC960_V1_StartDevice;
+ mbox->Type3D.id = DAC960_DirectCommandIdentifier;
+ mbox->Type3D.Channel = sdev->channel;
+ mbox->Type3D.TargetID = sdev->id;
+ mbox->Type3D.State = State & 0x1F;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V1.dcmd_mutex);
+
+ return status;
+}
+
+/*
+ DAC960_V2_ControllerInfo executes a DAC960 V2 Firmware Controller
+ Information Reading IOCTL Command and waits for completion. It returns
+ true on success and false on failure.
+
+ Data is returned in the controller's V2.NewControllerInformation dma-able
+ memory buffer.
+*/
+
+static unsigned char DAC960_V2_NewControllerInfo(DAC960_Controller_T *c)
+{
+ DAC960_V2_CommandBlock_T *cmd_blk = &c->V2.DirectCommandBlock;
+ DAC960_V2_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V2_DataTransferMemoryAddress_T *dma_addr;
+ unsigned char status;
+
+ mutex_lock(&c->V2.dcmd_mutex);
+ DAC960_V2_ClearCommand(cmd_blk);
+ mbox->ControllerInfo.id = DAC960_DirectCommandIdentifier;
+ mbox->ControllerInfo.opcode = DAC960_V2_IOCTL;
+ mbox->ControllerInfo.control.DataTransferControllerToHost = true;
+ mbox->ControllerInfo.control.NoAutoRequestSense = true;
+ mbox->ControllerInfo.dma_size =
+ sizeof(DAC960_V2_ControllerInfo_T);
+ mbox->ControllerInfo.ControllerNumber = 0;
+ mbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo;
+ dma_addr = &mbox->ControllerInfo.dma_addr;
+ dma_addr->ScatterGatherSegments[0].SegmentDataPointer =
+ c->V2.NewControllerInformationDMA;
+ dma_addr->ScatterGatherSegments[0].SegmentByteCount =
+ mbox->ControllerInfo.dma_size;
+ dev_dbg(&c->host->shost_gendev,
+ "Sending GetControllerInfo\n");
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V2.dcmd_mutex);
+ if (status == DAC960_V2_NormalCompletion) {
+ DAC960_V2_ControllerInfo_T *new =
+ c->V2.NewControllerInformation;
+ DAC960_V2_ControllerInfo_T *old =
+ &c->V2.ControllerInformation;
+ if (new->BackgroundInitializationsActive +
+ new->LogicalDeviceInitializationsActive +
+ new->PhysicalDeviceInitializationsActive +
+ new->ConsistencyChecksActive +
+ new->RebuildsActive +
+ new->OnlineExpansionsActive != 0)
+ c->V2.NeedControllerInformation = true;
+ if (new->LogicalDevicesPresent != old->LogicalDevicesPresent ||
+ new->LogicalDevicesCritical != old->LogicalDevicesCritical ||
+ new->LogicalDevicesOffline != old->LogicalDevicesOffline)
+ shost_printk(KERN_INFO, c->host,
+ "Logical drive count changes (%d/%d/%d)\n",
+ new->LogicalDevicesCritical,
+ new->LogicalDevicesOffline,
+ new->LogicalDevicesPresent);
+ c->LogicalDriveCount = new->LogicalDevicesPresent;
+ memcpy(old, new,
+ sizeof(DAC960_V2_ControllerInfo_T));
+ }
+
+ return status;
+}
+
+
+/*
+ DAC960_V2_LogicalDeviceInfo executes a DAC960 V2 Firmware Controller Logical
+ Device Information Reading IOCTL Command and waits for completion. It
+ returns true on success and false on failure.
+
+ Data is returned in the controller's V2.NewLogicalDeviceInformation
+*/
+
+static unsigned char
+DAC960_V2_NewLogicalDeviceInfo(DAC960_Controller_T *c,
+ unsigned short ldev_num,
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info)
+{
+ DAC960_V2_CommandBlock_T *cmd_blk = &c->V2.DirectCommandBlock;
+ DAC960_V2_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V2_DataTransferMemoryAddress_T *dma_addr;
+ unsigned char status;
+
+ mutex_lock(&c->V2.dcmd_mutex);
+ DAC960_V2_ClearCommand(cmd_blk);
+ mbox->LogicalDeviceInfo.id = DAC960_DirectCommandIdentifier;
+ mbox->LogicalDeviceInfo.opcode = DAC960_V2_IOCTL;
+ mbox->LogicalDeviceInfo.control.DataTransferControllerToHost = true;
+ mbox->LogicalDeviceInfo.control.NoAutoRequestSense = true;
+ mbox->LogicalDeviceInfo.dma_size =
+ sizeof(DAC960_V2_LogicalDeviceInfo_T);
+ mbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber = ldev_num;
+ mbox->LogicalDeviceInfo.IOCTL_Opcode =
+ DAC960_V2_GetLogicalDeviceInfoValid;
+ dma_addr = &mbox->LogicalDeviceInfo.dma_addr;
+ dma_addr->ScatterGatherSegments[0].SegmentDataPointer =
+ c->V2.NewLogicalDeviceInformationDMA;
+ dma_addr->ScatterGatherSegments[0].SegmentByteCount =
+ mbox->LogicalDeviceInfo.dma_size;
+ dev_dbg(&c->host->shost_gendev,
+ "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ if (status == DAC960_V2_NormalCompletion) {
+ unsigned short ldev_num = ldev_info->LogicalDeviceNumber;
+ DAC960_V2_LogicalDeviceInfo_T *new =
+ c->V2.NewLogicalDeviceInformation;
+ DAC960_V2_LogicalDeviceInfo_T *old = ldev_info;
+
+ if (old != NULL) {
+ unsigned long ldev_size =
+ new->ConfigurableDeviceSize;
+
+ if (new->State != old->State) {
+ const char *name;
+
+ name = DAC960_V2_DriveStateName(new->State);
+ shost_printk(KERN_INFO, c->host,
+ "Logical Drive %d is now %s\n",
+ ldev_num, name ? name : "Invalid");
+ }
+ if ((new->SoftErrors != old->SoftErrors) ||
+ (new->CommandsFailed != old->CommandsFailed) ||
+ (new->DeferredWriteErrors !=
+ old->DeferredWriteErrors))
+ shost_printk(KERN_INFO, c->host,
+ "Logical Drive %d Errors: "
+ "Soft = %d, Failed = %d, Deferred Write = %d\n",
+ ldev_num,
+ new->SoftErrors,
+ new->CommandsFailed,
+ new->DeferredWriteErrors);
+ if (new->BackgroundInitializationInProgress)
+ DAC960_V2_ReportProgress(c, ldev_num,
+ "Background Initialization",
+ new->BackgroundInitializationBlockNumber,
+ ldev_size);
+ else if (new->ForegroundInitializationInProgress)
+ DAC960_V2_ReportProgress(c, ldev_num,
+ "Foreground Initialization",
+ new->ForegroundInitializationBlockNumber,
+ ldev_size);
+ else if (new->DataMigrationInProgress)
+ DAC960_V2_ReportProgress(c, ldev_num,
+ "Data Migration",
+ new->DataMigrationBlockNumber,
+ ldev_size);
+ else if (new->PatrolOperationInProgress)
+ DAC960_V2_ReportProgress(c, ldev_num,
+ "Patrol Operation",
+ new->PatrolOperationBlockNumber,
+ ldev_size);
+ if (old->BackgroundInitializationInProgress &&
+ !new->BackgroundInitializationInProgress)
+ shost_printk(KERN_INFO, c->host,
+ "Logical Drive %d: "
+ "Background Initialization %s\n",
+ ldev_num,
+ (new->LogicalDeviceControl
+ .LogicalDeviceInitialized
+ ? "Completed" : "Failed"));
+ memcpy(ldev_info, c->V2.NewLogicalDeviceInformation,
+ sizeof(*ldev_info));
+ }
+ }
+ mutex_unlock(&c->V2.dcmd_mutex);
+ return status;
+}
+
+
+/*
+ DAC960_V2_PhysicalDeviceInfo executes a DAC960 V2 Firmware Controller "Read
+ Physical Device Information" IOCTL Command and waits for completion. It
+ returns true on success and false on failure.
+
+ The Channel, TargetID, LogicalUnit arguments should be 0 the first time
+ this function is called for a given controller. This will return data
+ for the "first" device on that controller. The returned data includes a
+ Channel, TargetID, LogicalUnit that can be passed in to this routine to
+ get data for the NEXT device on that controller.
+
+ Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able
+ memory buffer.
+
+*/
+
+static unsigned char
+DAC960_V2_NewPhysicalDeviceInfo(DAC960_Controller_T *c,
+ unsigned char Channel,
+ unsigned char TargetID,
+ unsigned char LogicalUnit,
+ DAC960_V2_PhysicalDeviceInfo_T *pdev_info)
+{
+ DAC960_V2_CommandBlock_T *cmd_blk = &c->V2.DirectCommandBlock;
+ DAC960_V2_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V2_DataTransferMemoryAddress_T *dma_addr;
+ unsigned char status;
+
+ mutex_lock(&c->V2.dcmd_mutex);
+ DAC960_V2_ClearCommand(cmd_blk);
+ mbox->PhysicalDeviceInfo.opcode = DAC960_V2_IOCTL;
+ mbox->PhysicalDeviceInfo.id = DAC960_DirectCommandIdentifier;
+ mbox->PhysicalDeviceInfo.control.DataTransferControllerToHost = true;
+ mbox->PhysicalDeviceInfo.control.NoAutoRequestSense = true;
+ mbox->PhysicalDeviceInfo.dma_size =
+ sizeof(DAC960_V2_PhysicalDeviceInfo_T);
+ mbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit = LogicalUnit;
+ mbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID;
+ mbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel;
+ mbox->PhysicalDeviceInfo.IOCTL_Opcode =
+ DAC960_V2_GetPhysicalDeviceInfoValid;
+ dma_addr = &mbox->PhysicalDeviceInfo.dma_addr;
+ dma_addr->ScatterGatherSegments[0].SegmentDataPointer =
+ c->V2.NewPhysicalDeviceInformationDMA;
+ dma_addr->ScatterGatherSegments[0].SegmentByteCount =
+ mbox->PhysicalDeviceInfo.dma_size;
+ dev_dbg(&c->host->shost_gendev,
+ "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
+ Channel, TargetID, LogicalUnit);
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ if (status == DAC960_V2_NormalCompletion)
+ memcpy(pdev_info, &c->V2.NewPhysicalDeviceInformation,
+ sizeof(*pdev_info));
+ mutex_unlock(&c->V2.dcmd_mutex);
+ return status;
+}
+
+/*
+ DAC960_V2_DeviceOperation executes a DAC960 V2 Firmware Controller Device
+ Operation IOCTL Command and waits for completion. It returns true on
+ success and false on failure.
+*/
+
+static unsigned char
+DAC960_V2_DeviceOperation(DAC960_Controller_T *c,
+ DAC960_V2_IOCTL_Opcode_T opcode,
+ DAC960_V2_OperationDevice_T opdev)
+{
+ DAC960_V2_CommandBlock_T *cmd_blk = &c->V2.DirectCommandBlock;
+ DAC960_V2_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned char status;
+
+ mutex_lock(&c->V2.dcmd_mutex);
+ DAC960_V2_ClearCommand(cmd_blk);
+ mbox->DeviceOperation.opcode = DAC960_V2_IOCTL;
+ mbox->DeviceOperation.id = DAC960_DirectCommandIdentifier;
+ mbox->DeviceOperation.control.DataTransferControllerToHost = true;
+ mbox->DeviceOperation.control.NoAutoRequestSense = true;
+ mbox->DeviceOperation.IOCTL_Opcode = opcode;
+ mbox->DeviceOperation.OperationDevice = opdev;
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V2.dcmd_mutex);
+ return status;
+}
+
+
+/*
+ DAC960_V2_TranslatePhysicalDevice translates a Physical Device Channel and
+ TargetID into a Logical Device.
+*/
+
+static unsigned char
+DAC960_V2_TranslatePhysicalDevice(DAC960_Controller_T *c,
+ unsigned char Channel,
+ unsigned char TargetID,
+ unsigned char LogicalUnit,
+ unsigned short *ldev_num)
+{
+ DAC960_V2_CommandBlock_T *cmd_blk;
+ DAC960_V2_CommandMailbox_T *mbox;
+ DAC960_V2_DataTransferMemoryAddress_T *dma_addr;
+ unsigned char status;
+
+ mutex_lock(&c->V2.dcmd_mutex);
+ cmd_blk = &c->V2.DirectCommandBlock;
+ mbox = &cmd_blk->mbox;
+ mbox->PhysicalDeviceInfo.opcode = DAC960_V2_IOCTL;
+ mbox->PhysicalDeviceInfo.control.DataTransferControllerToHost = true;
+ mbox->PhysicalDeviceInfo.control.NoAutoRequestSense = true;
+ mbox->PhysicalDeviceInfo.dma_size =
+ sizeof(DAC960_V2_PhysicalToLogicalDevice_T);
+ mbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID;
+ mbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel;
+ mbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit = LogicalUnit;
+ mbox->PhysicalDeviceInfo.IOCTL_Opcode =
+ DAC960_V2_TranslatePhysicalToLogicalDevice;
+ dma_addr = &mbox->PhysicalDeviceInfo.dma_addr;
+ dma_addr->ScatterGatherSegments[0].SegmentDataPointer =
+ c->V2.PhysicalToLogicalDeviceDMA;
+ dma_addr->ScatterGatherSegments[0].SegmentByteCount =
+ mbox->PhysicalDeviceInfo.dma_size;
+
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V2.dcmd_mutex);
+ if (status == DAC960_V2_NormalCompletion)
+ *ldev_num = c->V2.PhysicalToLogicalDevice->LogicalDeviceNumber;
+
+ return status;
+}
+
+
+static unsigned char DAC960_V2_MonitorGetEvent(DAC960_Controller_T *c)
+{
+ DAC960_V2_CommandBlock_T *cmd_blk = &c->V2.MonitoringCommandBlock;
+ DAC960_V2_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V2_DataTransferMemoryAddress_T *dma_addr;
+ unsigned char status;
+
+ mbox->GetEvent.opcode = DAC960_V2_IOCTL;
+ mbox->GetEvent.dma_size = sizeof(DAC960_V2_Event_T);
+ mbox->GetEvent.EventSequenceNumberHigh16 =
+ c->V2.NextEventSequenceNumber >> 16;
+ mbox->GetEvent.ControllerNumber = 0;
+ mbox->GetEvent.IOCTL_Opcode = DAC960_V2_GetEvent;
+ mbox->GetEvent.EventSequenceNumberLow16 =
+ c->V2.NextEventSequenceNumber & 0xFFFF;
+ dma_addr = &mbox->GetEvent.dma_addr;
+ dma_addr->ScatterGatherSegments[0].SegmentDataPointer =
+ c->V2.EventDMA;
+ dma_addr->ScatterGatherSegments[0].SegmentByteCount =
+ mbox->GetEvent.dma_size;
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+
+ return status;
+}
+
+/*
+ DAC960_V1_EnableMemoryMailboxInterface enables the Memory Mailbox Interface
+ for DAC960 V1 Firmware Controllers.
+
+ PD and P controller types have no memory mailbox, but still need the
+ other dma mapped memory.
+*/
+
+static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T *c)
+{
+ void __iomem *base = c->BaseAddress;
+ DAC960_HardwareType_T hw_type = c->HardwareType;
+ struct pci_dev *pdev = c->PCIDevice;
+ struct dma_loaf *DmaPages = &c->DmaPages;
+ size_t DmaPagesSize;
+ size_t CommandMailboxesSize;
+ size_t StatusMailboxesSize;
+
+ DAC960_V1_CommandMailbox_T *CommandMailboxesMemory;
+ dma_addr_t CommandMailboxesMemoryDMA;
+
+ DAC960_V1_StatusMailbox_T *StatusMailboxesMemory;
+ dma_addr_t StatusMailboxesMemoryDMA;
+
+ DAC960_V1_CommandMailbox_T mbox;
+ unsigned short status;
+ int timeout = 0;
+ int i;
+
+ memset(&mbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ dev_err(&pdev->dev, "DMA mask out of range\n");
+ return false;
+ }
+ c->BounceBufferLimit = DMA_BIT_MASK(32);
+
+ if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) {
+ CommandMailboxesSize = 0;
+ StatusMailboxesSize = 0;
+ } else {
+ CommandMailboxesSize = DAC960_V1_CommandMailboxCount * sizeof(DAC960_V1_CommandMailbox_T);
+ StatusMailboxesSize = DAC960_V1_StatusMailboxCount * sizeof(DAC960_V1_StatusMailbox_T);
+ }
+ DmaPagesSize = CommandMailboxesSize + StatusMailboxesSize +
+ sizeof(DAC960_V1_DCDB_T) + sizeof(DAC960_V1_Enquiry_T) +
+ sizeof(DAC960_V1_ErrorTable_T) + sizeof(DAC960_V1_EventLogEntry_T) +
+ sizeof(DAC960_V1_RebuildProgress_T) +
+ sizeof(DAC960_V1_LogicalDeviceInfoArray_T) +
+ sizeof(DAC960_V1_BackgroundInitializationStatus_T) +
+ sizeof(DAC960_V1_DeviceState_T);
+
+ if (!init_dma_loaf(pdev, DmaPages, DmaPagesSize))
+ return false;
+
+
+ if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller))
+ goto skip_mailboxes;
+
+ CommandMailboxesMemory = slice_dma_loaf(DmaPages,
+ CommandMailboxesSize, &CommandMailboxesMemoryDMA);
+
+ /* These are the base addresses for the command memory mailbox array */
+ c->V1.FirstCommandMailbox = CommandMailboxesMemory;
+ c->V1.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA;
+
+ CommandMailboxesMemory += DAC960_V1_CommandMailboxCount - 1;
+ c->V1.LastCommandMailbox = CommandMailboxesMemory;
+ c->V1.NextCommandMailbox = c->V1.FirstCommandMailbox;
+ c->V1.PreviousCommandMailbox1 = c->V1.LastCommandMailbox;
+ c->V1.PreviousCommandMailbox2 = c->V1.LastCommandMailbox - 1;
+
+ /* These are the base addresses for the status memory mailbox array */
+ StatusMailboxesMemory = slice_dma_loaf(DmaPages,
+ StatusMailboxesSize, &StatusMailboxesMemoryDMA);
+
+ c->V1.FirstStatusMailbox = StatusMailboxesMemory;
+ c->V1.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA;
+ StatusMailboxesMemory += DAC960_V1_StatusMailboxCount - 1;
+ c->V1.LastStatusMailbox = StatusMailboxesMemory;
+ c->V1.NextStatusMailbox = c->V1.FirstStatusMailbox;
+
+skip_mailboxes:
+ c->V1.NewEnquiry = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V1_Enquiry_T),
+ &c->V1.NewEnquiryDMA);
+
+ c->V1.NewErrorTable = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V1_ErrorTable_T),
+ &c->V1.NewErrorTableDMA);
+
+ c->V1.EventLogEntry = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V1_EventLogEntry_T),
+ &c->V1.EventLogEntryDMA);
+
+ c->V1.RebuildProgress = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V1_RebuildProgress_T),
+ &c->V1.RebuildProgressDMA);
+
+ c->V1.LogicalDeviceInfo = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V1_LogicalDeviceInfoArray_T),
+ &c->V1.LogicalDeviceInfoDMA);
+
+ c->V1.BackgroundInitializationStatus = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V1_BackgroundInitializationStatus_T),
+ &c->V1.BackgroundInitializationStatusDMA);
+
+ c->V1.NewDeviceState = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V1_DeviceState_T),
+ &c->V1.NewDeviceStateDMA);
+
+ if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller))
+ return true;
+
+ /* Enable the Memory Mailbox Interface. */
+ c->V1.DualModeMemoryMailboxInterface = true;
+ mbox.TypeX.opcode = 0x2B;
+ mbox.TypeX.id = 0;
+ mbox.TypeX.CommandOpcode2 = 0x14;
+ mbox.TypeX.CommandMailboxesBusAddress = c->V1.FirstCommandMailboxDMA;
+ mbox.TypeX.StatusMailboxesBusAddress = c->V1.FirstStatusMailboxDMA;
+
+ for (i = 0; i < 2; i++)
+ switch (c->HardwareType) {
+ case DAC960_LA_Controller:
+ timeout = 0;
+ while (timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (!DAC960_LA_HardwareMailboxFullP(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (DAC960_LA_HardwareMailboxFullP(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for empty mailbox\n");
+ return false;
+ }
+ DAC960_LA_WriteHardwareMailbox(base, &mbox);
+ DAC960_LA_HardwareMailboxNewCommand(base);
+ timeout = 0;
+ while (timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (DAC960_LA_HardwareMailboxStatusAvailableP(
+ base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (!DAC960_LA_HardwareMailboxStatusAvailableP(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for mailbox status\n");
+ return false;
+ }
+ status = DAC960_LA_ReadStatusRegister(base);
+ DAC960_LA_AcknowledgeHardwareMailboxInterrupt(base);
+ DAC960_LA_AcknowledgeHardwareMailboxStatus(base);
+ if (status == DAC960_V1_NormalCompletion)
+ return true;
+ c->V1.DualModeMemoryMailboxInterface = false;
+ mbox.TypeX.CommandOpcode2 = 0x10;
+ break;
+ case DAC960_PG_Controller:
+ timeout = 0;
+ while (timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (!DAC960_PG_HardwareMailboxFullP(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (DAC960_PG_HardwareMailboxFullP(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for empty mailbox\n");
+ return false;
+ }
+ DAC960_PG_WriteHardwareMailbox(base, &mbox);
+ DAC960_PG_HardwareMailboxNewCommand(base);
+
+ timeout = 0;
+ while (timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (DAC960_PG_HardwareMailboxStatusAvailableP(
+ base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (!DAC960_PG_HardwareMailboxStatusAvailableP(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for mailbox status\n");
+ return false;
+ }
+ status = DAC960_PG_ReadStatusRegister(base);
+ DAC960_PG_AcknowledgeHardwareMailboxInterrupt(base);
+ DAC960_PG_AcknowledgeHardwareMailboxStatus(base);
+ if (status == DAC960_V1_NormalCompletion)
+ return true;
+ c->V1.DualModeMemoryMailboxInterface = false;
+ mbox.TypeX.CommandOpcode2 = 0x10;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "Unknown Controller Type %X\n",
+ c->HardwareType);
+ return false;
+ break;
+ }
+ dev_err(&pdev->dev,
+ "Failed to enable mailbox, statux %02X\n",
+ status);
+ return false;
+}
+
+
+/*
+ DAC960_V2_EnableMemoryMailboxInterface enables the Memory Mailbox Interface
+ for DAC960 V2 Firmware Controllers.
+
+ Aggregate the space needed for the controller's memory mailbox and
+ the other data structures that will be targets of dma transfers with
+ the controller. Allocate a dma-mapped region of memory to hold these
+ structures. Then, save CPU pointers and dma_addr_t values to reference
+ the structures that are contained in that region.
+*/
+
+static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T *c)
+{
+ void __iomem *base = c->BaseAddress;
+ struct pci_dev *pdev = c->PCIDevice;
+ struct dma_loaf *DmaPages = &c->DmaPages;
+ size_t DmaPagesSize;
+ size_t CommandMailboxesSize;
+ size_t StatusMailboxesSize;
+
+ DAC960_V2_CommandMailbox_T *CommandMailboxesMemory;
+ dma_addr_t CommandMailboxesMemoryDMA;
+
+ DAC960_V2_StatusMailbox_T *StatusMailboxesMemory;
+ dma_addr_t StatusMailboxesMemoryDMA;
+
+ DAC960_V2_CommandMailbox_T *mbox;
+ dma_addr_t CommandMailboxDMA;
+ unsigned char status;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ c->BounceBufferLimit = DMA_BIT_MASK(64);
+ else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ c->BounceBufferLimit = DMA_BIT_MASK(32);
+ else {
+ dev_err(&pdev->dev, "DMA mask out of range\n");
+ return false;
+ }
+
+ /* This is a temporary dma mapping, used only in the scope of this function */
+ mbox = pci_alloc_consistent(pdev,
+ sizeof(DAC960_V2_CommandMailbox_T),
+ &CommandMailboxDMA);
+ if (mbox == NULL)
+ return false;
+
+ CommandMailboxesSize = DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T);
+ StatusMailboxesSize = DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T);
+ DmaPagesSize =
+ CommandMailboxesSize + StatusMailboxesSize +
+ sizeof(DAC960_V2_HealthStatusBuffer_T) +
+ sizeof(DAC960_V2_ControllerInfo_T) +
+ sizeof(DAC960_V2_LogicalDeviceInfo_T) +
+ sizeof(DAC960_V2_PhysicalDeviceInfo_T) +
+ sizeof(DAC960_V2_Event_T) +
+ sizeof(DAC960_V2_PhysicalToLogicalDevice_T);
+
+ if (!init_dma_loaf(pdev, DmaPages, DmaPagesSize)) {
+ pci_free_consistent(pdev, sizeof(DAC960_V2_CommandMailbox_T),
+ mbox, CommandMailboxDMA);
+ return false;
+ }
+
+ CommandMailboxesMemory = slice_dma_loaf(DmaPages,
+ CommandMailboxesSize, &CommandMailboxesMemoryDMA);
+
+ /* These are the base addresses for the command memory mailbox array */
+ c->V2.FirstCommandMailbox = CommandMailboxesMemory;
+ c->V2.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA;
+
+ CommandMailboxesMemory += DAC960_V2_CommandMailboxCount - 1;
+ c->V2.LastCommandMailbox = CommandMailboxesMemory;
+ c->V2.NextCommandMailbox = c->V2.FirstCommandMailbox;
+ c->V2.PreviousCommandMailbox1 = c->V2.LastCommandMailbox;
+ c->V2.PreviousCommandMailbox2 = c->V2.LastCommandMailbox - 1;
+
+ /* These are the base addresses for the status memory mailbox array */
+ StatusMailboxesMemory = slice_dma_loaf(DmaPages,
+ StatusMailboxesSize, &StatusMailboxesMemoryDMA);
+
+ c->V2.FirstStatusMailbox = StatusMailboxesMemory;
+ c->V2.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA;
+ StatusMailboxesMemory += DAC960_V2_StatusMailboxCount - 1;
+ c->V2.LastStatusMailbox = StatusMailboxesMemory;
+ c->V2.NextStatusMailbox = c->V2.FirstStatusMailbox;
+
+ c->V2.HealthStatusBuffer = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V2_HealthStatusBuffer_T),
+ &c->V2.HealthStatusBufferDMA);
+
+ c->V2.NewControllerInformation = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V2_ControllerInfo_T),
+ &c->V2.NewControllerInformationDMA);
+
+ c->V2.NewLogicalDeviceInformation = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V2_LogicalDeviceInfo_T),
+ &c->V2.NewLogicalDeviceInformationDMA);
+
+ c->V2.NewPhysicalDeviceInformation = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V2_PhysicalDeviceInfo_T),
+ &c->V2.NewPhysicalDeviceInformationDMA);
+
+ c->V2.Event = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V2_Event_T),
+ &c->V2.EventDMA);
+
+ c->V2.PhysicalToLogicalDevice = slice_dma_loaf(DmaPages,
+ sizeof(DAC960_V2_PhysicalToLogicalDevice_T),
+ &c->V2.PhysicalToLogicalDeviceDMA);
+
+ /*
+ Enable the Memory Mailbox Interface.
+
+ I don't know why we can't just use one of the memory mailboxes
+ we just allocated to do this, instead of using this temporary one.
+ Try this change later.
+ */
+ memset(mbox, 0, sizeof(DAC960_V2_CommandMailbox_T));
+ mbox->SetMemoryMailbox.id = 1;
+ mbox->SetMemoryMailbox.opcode = DAC960_V2_IOCTL;
+ mbox->SetMemoryMailbox.control.NoAutoRequestSense = true;
+ mbox->SetMemoryMailbox.FirstCommandMailboxSizeKB =
+ (DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T)) >> 10;
+ mbox->SetMemoryMailbox.FirstStatusMailboxSizeKB =
+ (DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T)) >> 10;
+ mbox->SetMemoryMailbox.SecondCommandMailboxSizeKB = 0;
+ mbox->SetMemoryMailbox.SecondStatusMailboxSizeKB = 0;
+ mbox->SetMemoryMailbox.sense_len = 0;
+ mbox->SetMemoryMailbox.IOCTL_Opcode = DAC960_V2_SetMemoryMailbox;
+ mbox->SetMemoryMailbox.HealthStatusBufferSizeKB = 1;
+ mbox->SetMemoryMailbox.HealthStatusBufferBusAddress =
+ c->V2.HealthStatusBufferDMA;
+ mbox->SetMemoryMailbox.FirstCommandMailboxBusAddress =
+ c->V2.FirstCommandMailboxDMA;
+ mbox->SetMemoryMailbox.FirstStatusMailboxBusAddress =
+ c->V2.FirstStatusMailboxDMA;
+ switch (c->HardwareType) {
+ case DAC960_GEM_Controller:
+ while (DAC960_GEM_HardwareMailboxFullP(base))
+ udelay(1);
+ DAC960_GEM_WriteHardwareMailbox(base, CommandMailboxDMA);
+ DAC960_GEM_HardwareMailboxNewCommand(base);
+ while (!DAC960_GEM_HardwareMailboxStatusAvailableP(base))
+ udelay(1);
+ status = DAC960_GEM_ReadCommandStatus(base);
+ DAC960_GEM_AcknowledgeHardwareMailboxInterrupt(base);
+ DAC960_GEM_AcknowledgeHardwareMailboxStatus(base);
+ break;
+ case DAC960_BA_Controller:
+ while (DAC960_BA_HardwareMailboxFullP(base))
+ udelay(1);
+ DAC960_BA_WriteHardwareMailbox(base, CommandMailboxDMA);
+ DAC960_BA_HardwareMailboxNewCommand(base);
+ while (!DAC960_BA_HardwareMailboxStatusAvailableP(base))
+ udelay(1);
+ status = DAC960_BA_ReadCommandStatus(base);
+ DAC960_BA_AcknowledgeHardwareMailboxInterrupt(base);
+ DAC960_BA_AcknowledgeHardwareMailboxStatus(base);
+ break;
+ case DAC960_LP_Controller:
+ while (DAC960_LP_HardwareMailboxFullP(base))
+ udelay(1);
+ DAC960_LP_WriteHardwareMailbox(base, CommandMailboxDMA);
+ DAC960_LP_HardwareMailboxNewCommand(base);
+ while (!DAC960_LP_HardwareMailboxStatusAvailableP(base))
+ udelay(1);
+ status = DAC960_LP_ReadCommandStatus(base);
+ DAC960_LP_AcknowledgeHardwareMailboxInterrupt(base);
+ DAC960_LP_AcknowledgeHardwareMailboxStatus(base);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown Controller Type %X\n",
+ c->HardwareType);
+ return false;
+ }
+ pci_free_consistent(pdev, sizeof(DAC960_V2_CommandMailbox_T),
+ mbox, CommandMailboxDMA);
+ if (status != DAC960_V2_NormalCompletion)
+ dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
+ status);
+ return (status == DAC960_V2_NormalCompletion);
+}
+
+
+/*
+ DAC960_V1_ReadControllerConfiguration reads the Configuration Information
+ from DAC960 V1 Firmware Controllers and initializes the Controller structure.
+*/
+
+static int DAC960_V1_ReadControllerConfiguration(DAC960_Controller_T *c)
+{
+ DAC960_V1_Enquiry2_T *Enquiry2;
+ dma_addr_t Enquiry2DMA;
+ DAC960_V1_Config2_T *Config2;
+ dma_addr_t Config2DMA;
+ struct Scsi_Host *shost = c->host;
+ struct pci_dev *pdev = c->PCIDevice;
+ unsigned short status;
+ int ret = -ENODEV;
+
+ Enquiry2 = pci_zalloc_consistent(pdev, sizeof(DAC960_V1_Enquiry2_T),
+ &Enquiry2DMA);
+ if (!Enquiry2) {
+ shost_printk(KERN_ERR, c->host,
+ "Failed to allocated V1 Enquiry2 memory\n");
+ return -ENOMEM;
+ }
+ Config2 = pci_zalloc_consistent(pdev, sizeof(DAC960_V1_Config2_T),
+ &Config2DMA);
+ if (!Config2) {
+ shost_printk(KERN_ERR, c->host,
+ "Failed to allocate V1 Config2 memory\n");
+ pci_free_consistent(pdev, sizeof(DAC960_V1_Enquiry2_T),
+ Enquiry2, Enquiry2DMA);
+ return -ENOMEM;
+ }
+ mutex_lock(&c->V1.dma_mutex);
+ status = DAC960_V1_NewEnquiry(c);
+ mutex_unlock(&c->V1.dma_mutex);
+ if (status != DAC960_V1_NormalCompletion) {
+ shost_printk(KERN_WARNING, c->host,
+ "Failed it issue V1 Enquiry\n");
+ goto out;
+ }
+
+ status = DAC960_V1_ExecuteType3(c, DAC960_V1_Enquiry2, Enquiry2DMA);
+ if (status != DAC960_V1_NormalCompletion) {
+ shost_printk(KERN_WARNING, c->host,
+ "Failed to issue V1 Enquiry2\n");
+ goto out;
+ }
+
+ status = DAC960_V1_ExecuteType3(c, DAC960_V1_ReadConfig2, Config2DMA);
+ if (status != DAC960_V1_NormalCompletion) {
+ shost_printk(KERN_WARNING, c->host,
+ "Failed to issue ReadConfig2\n");
+ goto out;
+ }
+
+ status = DAC960_V1_GetLogicalDriveInfo(c);
+ if (status != DAC960_V1_NormalCompletion) {
+ shost_printk(KERN_WARNING, c->host,
+ "Failed to get logical drive information\n");
+ goto out;
+ }
+
+ /*
+ Initialize the Controller Model Name and Full Model Name fields.
+ */
+ switch (Enquiry2->HardwareID.SubModel) {
+ case DAC960_V1_P_PD_PU:
+ if (Enquiry2->SCSICapability.BusSpeed == DAC960_V1_Ultra)
+ strcpy(c->ModelName, "DAC960PU");
+ else
+ strcpy(c->ModelName, "DAC960PD");
+ break;
+ case DAC960_V1_PL:
+ strcpy(c->ModelName, "DAC960PL");
+ break;
+ case DAC960_V1_PG:
+ strcpy(c->ModelName, "DAC960PG");
+ break;
+ case DAC960_V1_PJ:
+ strcpy(c->ModelName, "DAC960PJ");
+ break;
+ case DAC960_V1_PR:
+ strcpy(c->ModelName, "DAC960PR");
+ break;
+ case DAC960_V1_PT:
+ strcpy(c->ModelName, "DAC960PT");
+ break;
+ case DAC960_V1_PTL0:
+ strcpy(c->ModelName, "DAC960PTL0");
+ break;
+ case DAC960_V1_PRL:
+ strcpy(c->ModelName, "DAC960PRL");
+ break;
+ case DAC960_V1_PTL1:
+ strcpy(c->ModelName, "DAC960PTL1");
+ break;
+ case DAC960_V1_1164P:
+ strcpy(c->ModelName, "eXtremeRAID 1100");
+ break;
+ default:
+ shost_printk(KERN_WARNING, c->host,
+ "Unknown Model %X\n",
+ Enquiry2->HardwareID.SubModel);
+ goto out;
+ }
+ strcpy(c->FullModelName, DAC960_DriverName);
+ strcat(c->FullModelName, " ");
+ strcat(c->FullModelName, c->ModelName);
+ /*
+ Initialize the Controller Firmware Version field and verify that it
+ is a supported firmware version. The supported firmware versions are:
+
+ DAC1164P 5.06 and above
+ DAC960PTL/PRL/PJ/PG 4.06 and above
+ DAC960PU/PD/PL 3.51 and above
+ DAC960PU/PD/PL/P 2.73 and above
+ */
+#if defined(CONFIG_ALPHA)
+ /*
+ DEC Alpha machines were often equipped with DAC960 cards that were
+ OEMed from Mylex, and had their own custom firmware. Version 2.70,
+ the last custom FW revision to be released by DEC for these older
+ controllers, appears to work quite well with this driver.
+
+ Cards tested successfully were several versions each of the PD and
+ PU, called by DEC the KZPSC and KZPAC, respectively, and having
+ the Manufacturer Numbers (from Mylex), usually on a sticker on the
+ back of the board, of:
+
+ KZPSC: D040347 (1-channel) or D040348 (2-channel) or D040349 (3-channel)
+ KZPAC: D040395 (1-channel) or D040396 (2-channel) or D040397 (3-channel)
+ */
+# define FIRMWARE_27X "2.70"
+#else
+# define FIRMWARE_27X "2.73"
+#endif
+
+ if (Enquiry2->FirmwareID.MajorVersion == 0) {
+ Enquiry2->FirmwareID.MajorVersion =
+ c->V1.Enquiry.MajorFirmwareVersion;
+ Enquiry2->FirmwareID.MinorVersion =
+ c->V1.Enquiry.MinorFirmwareVersion;
+ Enquiry2->FirmwareID.FirmwareType = '0';
+ Enquiry2->FirmwareID.TurnID = 0;
+ }
+ sprintf(c->FirmwareVersion, "%d.%02d-%c-%02d",
+ Enquiry2->FirmwareID.MajorVersion,
+ Enquiry2->FirmwareID.MinorVersion,
+ Enquiry2->FirmwareID.FirmwareType,
+ Enquiry2->FirmwareID.TurnID);
+ if (!((c->FirmwareVersion[0] == '5' &&
+ strcmp(c->FirmwareVersion, "5.06") >= 0) ||
+ (c->FirmwareVersion[0] == '4' &&
+ strcmp(c->FirmwareVersion, "4.06") >= 0) ||
+ (c->FirmwareVersion[0] == '3' &&
+ strcmp(c->FirmwareVersion, "3.51") >= 0) ||
+ (c->FirmwareVersion[0] == '2' &&
+ strcmp(c->FirmwareVersion, FIRMWARE_27X) >= 0))) {
+ shost_printk(KERN_WARNING, c->host,
+ "Firmware Version '%s' unsupported\n",
+ c->FirmwareVersion);
+ goto out;
+ }
+ /*
+ Initialize the c Channels, Targets, Memory Size, and SAF-TE
+ Enclosure Management Enabled fields.
+ */
+ switch (Enquiry2->HardwareID.Model) {
+ case DAC960_V1_FiveChannelBoard:
+ c->PhysicalChannelMax = 5;
+ break;
+ case DAC960_V1_ThreeChannelBoard:
+ case DAC960_V1_ThreeChannelASIC_DAC:
+ c->PhysicalChannelMax = 3;
+ break;
+ case DAC960_V1_TwoChannelBoard:
+ c->PhysicalChannelMax = 2;
+ break;
+ default:
+ c->PhysicalChannelMax = Enquiry2->ActualChannels;
+ break;
+ }
+ c->PhysicalChannelCount = Enquiry2->ActualChannels;
+ c->LogicalChannelCount = 1;
+ c->LogicalChannelMax = 1;
+ if (Enquiry2->SCSICapability.BusWidth == DAC960_V1_Wide_32bit)
+ c->V1.BusWidth = 32;
+ else if (Enquiry2->SCSICapability.BusWidth == DAC960_V1_Wide_16bit)
+ c->V1.BusWidth = 16;
+ else
+ c->V1.BusWidth = 8;
+ c->V1.LogicalBlockSize = Enquiry2->LogicalDriveBlockSize;
+ shost->max_channel = c->PhysicalChannelCount + c->LogicalChannelCount;
+ shost->max_id = Enquiry2->MaxTargets;
+ if (Enquiry2->MaxLogicalDrives > shost->max_id) {
+ int channels;
+
+ channels = Enquiry2->MaxLogicalDrives / shost->max_id;
+ c->LogicalChannelCount = c->LogicalChannelMax = channels;
+ }
+ c->MemorySize = Enquiry2->MemorySize >> 20;
+ c->V1.SAFTE_EnclosureManagementEnabled =
+ (Enquiry2->FaultManagementType == DAC960_V1_SAFTE);
+ /*
+ Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive
+ Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and
+ Driver Scatter/Gather Limit. The Driver Queue Depth must be at most one
+ less than the Controller Queue Depth to allow for an automatic drive
+ rebuild operation.
+ */
+ c->ControllerQueueDepth = c->V1.Enquiry.MaxCommands;
+ if (c->ControllerQueueDepth < 3)
+ c->ControllerQueueDepth = Enquiry2->MaxCommands;
+ if (c->ControllerQueueDepth < 3)
+ /* Play safe and disable TCQ */
+ c->ControllerQueueDepth = 3;
+ shost->can_queue = c->ControllerQueueDepth - 2;
+ if (shost->can_queue > DAC960_MaxDriverQueueDepth)
+ shost->can_queue = DAC960_MaxDriverQueueDepth;
+ c->LogicalDriveCount = c->V1.Enquiry.NumberOfLogicalDrives;
+ shost->max_sectors = Enquiry2->MaxBlocksPerCommand;
+ c->ControllerScatterGatherLimit = Enquiry2->MaxScatterGatherEntries;
+ shost->sg_tablesize = c->ControllerScatterGatherLimit;
+ if (shost->sg_tablesize > DAC960_V1_ScatterGatherLimit)
+ shost->sg_tablesize = DAC960_V1_ScatterGatherLimit;
+ /*
+ Initialize the Stripe Size, Segment Size, and Geometry Translation.
+ */
+ c->V1.StripeSize = Config2->BlocksPerStripe * Config2->BlockFactor
+ >> (10 - DAC960_BlockSizeBits);
+ c->V1.SegmentSize = Config2->BlocksPerCacheLine * Config2->BlockFactor
+ >> (10 - DAC960_BlockSizeBits);
+ switch (Config2->DriveGeometry) {
+ case DAC960_V1_Geometry_128_32:
+ c->V1.GeometryTranslationHeads = 128;
+ c->V1.GeometryTranslationSectors = 32;
+ break;
+ case DAC960_V1_Geometry_255_63:
+ c->V1.GeometryTranslationHeads = 255;
+ c->V1.GeometryTranslationSectors = 63;
+ break;
+ default:
+ shost_printk(KERN_WARNING, c->host,
+ "Invalid config2 drive geometry %x\n",
+ Config2->DriveGeometry);
+ goto out;
+ }
+ /*
+ Initialize the Background Initialization Status.
+ */
+ if ((c->FirmwareVersion[0] == '4' &&
+ strcmp(c->FirmwareVersion, "4.08") >= 0) ||
+ (c->FirmwareVersion[0] == '5' &&
+ strcmp(c->FirmwareVersion, "5.08") >= 0)) {
+ c->V1.BackgroundInitializationStatusSupported = true;
+ DAC960_V1_ExecuteType3B(c,
+ DAC960_V1_BackgroundInitializationControl, 0x20,
+ c->
+ V1.BackgroundInitializationStatusDMA);
+ memcpy(&c->V1.LastBackgroundInitializationStatus,
+ c->V1.BackgroundInitializationStatus,
+ sizeof(DAC960_V1_BackgroundInitializationStatus_T));
+ }
+ c->V1.LastRebuildStatus = DAC960_V1_NoRebuildOrCheckInProgress;
+ ret = 0;
+
+out:
+ pci_free_consistent(pdev, sizeof(DAC960_V1_Enquiry2_T),
+ Enquiry2, Enquiry2DMA);
+ pci_free_consistent(pdev, sizeof(DAC960_V1_Config2_T),
+ Config2, Config2DMA);
+ return ret;
+}
+
+
+/*
+ DAC960_V2_ReadControllerConfiguration reads the Configuration Information
+ from DAC960 V2 Firmware Controllers and initializes the Controller structure.
+*/
+
+static int DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T *c)
+{
+ DAC960_V2_ControllerInfo_T *info = &c->V2.ControllerInformation;
+ struct Scsi_Host *shost = c->host;
+ unsigned char status;
+ int i, ModelNameLength;
+
+ /* Get data into dma-able area, then copy into permanent location */
+ mutex_lock(&c->V2.cinfo_mutex);
+ status = DAC960_V2_NewControllerInfo(c);
+ mutex_unlock(&c->V2.cinfo_mutex);
+ if (status != DAC960_V2_NormalCompletion) {
+ shost_printk(KERN_ERR, shost,
+ "Failed to get controller information\n");
+ return -ENODEV;
+ }
+
+ /*
+ Initialize the Controller Model Name and Full Model Name fields.
+ */
+ ModelNameLength = sizeof(info->ControllerName);
+ if (ModelNameLength > sizeof(c->ModelName)-1)
+ ModelNameLength = sizeof(c->ModelName)-1;
+ memcpy(c->ModelName, info->ControllerName,
+ ModelNameLength);
+ ModelNameLength--;
+ while (c->ModelName[ModelNameLength] == ' ' ||
+ c->ModelName[ModelNameLength] == '\0')
+ ModelNameLength--;
+ c->ModelName[++ModelNameLength] = '\0';
+ strcpy(c->FullModelName, DAC960_DriverName);
+ strcat(c->FullModelName, " ");
+ strcat(c->FullModelName, c->ModelName);
+ /*
+ Initialize the Controller Firmware Version field.
+ */
+ sprintf(c->FirmwareVersion, "%d.%02d-%02d",
+ info->FirmwareMajorVersion,
+ info->FirmwareMinorVersion,
+ info->FirmwareTurnNumber);
+ if (info->FirmwareMajorVersion == 6 &&
+ info->FirmwareMinorVersion == 0 &&
+ info->FirmwareTurnNumber < 1) {
+ shost_printk(KERN_WARNING, shost,
+ "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
+ "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
+ "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
+ c->FirmwareVersion);
+ }
+ /*
+ Initialize the Controller Channels, Targets, and Memory Size.
+ */
+ c->PhysicalChannelMax = info->NumberOfPhysicalChannelsPossible;
+ c->PhysicalChannelCount = info->NumberOfPhysicalChannelsPresent;
+ c->LogicalChannelMax = info->NumberOfVirtualChannelsPossible;
+ c->LogicalChannelCount = info->NumberOfVirtualChannelsPresent;
+ shost->max_channel = c->PhysicalChannelCount + c->LogicalChannelCount;
+ shost->max_id = info->MaximumTargetsPerChannel[0];
+ for (i = 1; i < 16; i++) {
+ if (!info->MaximumTargetsPerChannel[i])
+ continue;
+ if (shost->max_id < info->MaximumTargetsPerChannel[i])
+ shost->max_id = info->MaximumTargetsPerChannel[i];
+ }
+ c->MemorySize = info->MemorySizeMB;
+ /*
+ * Initialize the Controller Queue Depth, Driver Queue Depth,
+ * Logical Drive Count, Maximum Blocks per Command, Controller
+ * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+ * The Driver Queue Depth must be at most three less than
+ * the Controller Queue Depth; tag '1' is reserved for
+ * direct commands, and tag '2' for monitoring commands.
+ */
+ c->ControllerQueueDepth = info->MaximumParallelCommands;
+ shost->can_queue = c->ControllerQueueDepth - 3;
+ if (shost->can_queue > DAC960_MaxDriverQueueDepth)
+ shost->can_queue = DAC960_MaxDriverQueueDepth;
+ c->LogicalDriveCount = info->LogicalDevicesPresent;
+ shost->max_sectors =
+ info->MaximumDataTransferSizeInBlocks;
+ c->ControllerScatterGatherLimit =
+ info->MaximumScatterGatherEntries;
+ shost->sg_tablesize = c->ControllerScatterGatherLimit;
+ if (shost->sg_tablesize > DAC960_V2_ScatterGatherLimit)
+ shost->sg_tablesize = DAC960_V2_ScatterGatherLimit;
+ return 0;
+}
+
+
+/*
+ DAC960_ReportControllerConfiguration reports the Configuration Information
+ for Controller.
+*/
+
+static void DAC960_ReportControllerConfiguration(DAC960_Controller_T *c)
+{
+ shost_printk(KERN_INFO, c->host,
+ "Configuring %s PCI RAID Controller\n", c->ModelName);
+ shost_printk(KERN_INFO, c->host,
+ " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
+ c->FirmwareVersion, c->PhysicalChannelCount, c->MemorySize);
+ if (c->IO_Address == 0)
+ shost_printk(KERN_INFO, c->host,
+ " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
+ (unsigned long)c->PCI_Address, c->IRQ_Channel);
+ else
+ shost_printk(KERN_INFO, c->host,
+ " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
+ (unsigned long)c->IO_Address,
+ (unsigned long)c->PCI_Address,
+ c->IRQ_Channel);
+ shost_printk(KERN_INFO, c->host,
+ " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+ c->ControllerQueueDepth, c->host->max_sectors);
+ shost_printk(KERN_INFO, c->host,
+ " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+ c->host->can_queue, c->host->sg_tablesize,
+ c->ControllerScatterGatherLimit);
+ if (c->FirmwareType == DAC960_V1_Controller) {
+ shost_printk(KERN_INFO, c->host,
+ " Stripe Size: %dKB, Segment Size: %dKB, "
+ "BIOS Geometry: %d/%d%s\n",
+ c->V1.StripeSize,
+ c->V1.SegmentSize,
+ c->V1.GeometryTranslationHeads,
+ c->V1.GeometryTranslationSectors,
+ c->V1.SAFTE_EnclosureManagementEnabled ?
+ " SAF-TE Enclosure Management Enabled" : "");
+ shost_printk(KERN_INFO, c->host,
+ " Physical: %d/%d channels\n",
+ c->PhysicalChannelCount, c->PhysicalChannelMax);
+ } else {
+ int i;
+ DAC960_V2_ControllerInfo_T *info;
+
+ info = &c->V2.ControllerInformation;
+ for (i = 0; i < c->PhysicalChannelMax; i++) {
+ if (!info->MaximumTargetsPerChannel[i])
+ continue;
+ shost_printk(KERN_INFO, c->host,
+ " Device Channel %d: max %d devices\n",
+ i, info->MaximumTargetsPerChannel[i]);
+ }
+ shost_printk(KERN_INFO, c->host,
+ " Physical: %d/%d channels, %d disks, %d devices\n",
+ c->PhysicalChannelCount, c->PhysicalChannelMax,
+ info->PhysicalDisksPresent,
+ info->PhysicalDevicesPresent);
+ }
+ shost_printk(KERN_INFO, c->host,
+ " Logical: %d/%d channels, %d disks\n",
+ c->LogicalChannelCount, c->LogicalChannelMax,
+ c->LogicalDriveCount);
+}
+
+/*
+ DAC960_ReportErrorStatus reports Controller BIOS Messages passed through
+ the Error Status Register when the driver performs the BIOS handshaking.
+ It returns true for fatal errors and false otherwise.
+*/
+
+static bool DAC960_ReportErrorStatus(DAC960_Controller_T *c,
+ unsigned char ErrorStatus,
+ unsigned char Parameter0,
+ unsigned char Parameter1)
+{
+ struct pci_dev *pdev = c->PCIDevice;
+
+ switch (ErrorStatus) {
+ case 0x00:
+ dev_info(&pdev->dev,
+ "Physical Device %d:%d Not Responding\n",
+ Parameter1, Parameter0);
+ break;
+ case 0x08:
+ if (c->DriveSpinUpMessageDisplayed)
+ break;
+ dev_notice(&pdev->dev, "Spinning Up Drives\n");
+ c->DriveSpinUpMessageDisplayed = true;
+ break;
+ case 0x30:
+ dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+ break;
+ case 0x60:
+ dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+ break;
+ case 0x70:
+ dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+ break;
+ case 0x90:
+ dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+ Parameter1, Parameter0);
+ break;
+ case 0xA0:
+ dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+ break;
+ case 0xB0:
+ dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+ break;
+ case 0xD0:
+ dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+ break;
+ case 0xF0:
+ dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+ return true;
+ default:
+ dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+ ErrorStatus);
+ return true;
+ }
+ return false;
+}
+
+
+/*
+ * DAC960_DetectCleanup releases the resources that were allocated
+ * during DAC960_DetectController(). DAC960_DetectController can
+ * has several internal failure points, so not ALL resources may
+ * have been allocated. It's important to free only
+ * resources that HAVE been allocated. The code below always
+ * tests that the resource has been allocated before attempting to
+ * free it.
+ */
+static void DAC960_DetectCleanup(DAC960_Controller_T *c)
+{
+ struct pci_dev *pdev = c->PCIDevice;
+
+ /* Free the memory mailbox, status, and related structures */
+ free_dma_loaf(pdev, &c->DmaPages);
+ if (c->MemoryMappedAddress) {
+ DAC960_DisableInterrupts(c);
+ iounmap(c->MemoryMappedAddress);
+ }
+ if (c->IRQ_Channel)
+ free_irq(c->IRQ_Channel, c);
+ if (c->IO_Address)
+ release_region(c->IO_Address, 0x80);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ destroy_workqueue(c->work_q);
+ scsi_host_put(c->host);
+}
+
+int DAC960_host_reset(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost = scmd->device->host;
+ DAC960_Controller_T *c =
+ (DAC960_Controller_T *)shost->hostdata;
+
+ c->Reset(c->BaseAddress);
+ return SUCCESS;
+}
+
+static int mylex_v1_pthru_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+ DAC960_V1_CommandBlock_T *cmd_blk = scsi_cmd_priv(scmd);
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V1_DCDB_T *DCDB;
+ dma_addr_t DCDB_dma;
+ struct scsi_device *sdev = scmd->device;
+ struct scatterlist *sgl;
+ unsigned long flags;
+ int nsge;
+
+ DAC960_V1_ClearCommand(cmd_blk);
+ DCDB = pci_pool_alloc(c->V1.DCDBPool, GFP_ATOMIC, &DCDB_dma);
+ if (!DCDB)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ nsge = scsi_dma_map(scmd);
+ if (nsge > 1) {
+ pci_pool_free(c->V1.DCDBPool, DCDB, DCDB_dma);
+ cmd_blk->DCDB = NULL;
+ scmd->result = (DID_ERROR << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ cmd_blk->DCDB = DCDB;
+ cmd_blk->DCDB_dma = DCDB_dma;
+ mbox->Type3.opcode = DAC960_V1_DCDB;
+ mbox->Type3.id = scmd->request->tag + 3;
+ mbox->Type3.BusAddress = DCDB_dma;
+ DCDB->Channel = sdev->channel;
+ DCDB->TargetID = sdev->id;
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ DCDB->Direction = DAC960_V1_DCDB_NoDataTransfer;
+ break;
+ case DMA_TO_DEVICE:
+ DCDB->Direction = DAC960_V1_DCDB_DataTransferSystemToDevice;
+ break;
+ case DMA_FROM_DEVICE:
+ DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
+ break;
+ default:
+ DCDB->Direction = DAC960_V1_DCDB_IllegalDataTransfer;
+ break;
+ }
+ DCDB->EarlyStatus = false;
+ if (scmd->request->timeout <= 10)
+ DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
+ else if (scmd->request->timeout <= 60)
+ DCDB->Timeout = DAC960_V1_DCDB_Timeout_60_seconds;
+ else if (scmd->request->timeout <= 600)
+ DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_minutes;
+ else
+ DCDB->Timeout = DAC960_V1_DCDB_Timeout_24_hours;
+ DCDB->NoAutomaticRequestSense = false;
+ DCDB->DisconnectPermitted = true;
+ sgl = scsi_sglist(scmd);
+ DCDB->BusAddress = sg_dma_address(sgl);
+ if (sg_dma_len(sgl) > USHRT_MAX) {
+ DCDB->TransferLength = sg_dma_len(sgl) & 0xffff;
+ DCDB->TransferLengthHigh4 = sg_dma_len(sgl) >> 16;
+ } else {
+ DCDB->TransferLength = sg_dma_len(sgl);
+ DCDB->TransferLengthHigh4 = 0;
+ }
+ DCDB->CDBLength = scmd->cmd_len;
+ DCDB->SenseLength = sizeof(DCDB->SenseData);
+ memcpy(&DCDB->CDB, scmd->cmnd, scmd->cmd_len);
+
+ spin_lock_irqsave(&c->queue_lock, flags);
+ c->V1.QueueCommand(c, cmd_blk);
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+ return 0;
+}
+
+static void mylex_v1_inquiry(DAC960_Controller_T *c,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char inq[36] = {
+ 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
+ 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ };
+
+ if (c->V1.BusWidth > 16)
+ inq[7] |= 1 << 6;
+ if (c->V1.BusWidth > 8)
+ inq[7] |= 1 << 5;
+ memcpy(&inq[16], c->ModelName, 16);
+ memcpy(&inq[32], c->FirmwareVersion, 4);
+
+ scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
+}
+
+static void
+mylex_v1_mode_sense(DAC960_Controller_T *c,
+ struct scsi_cmnd *scmd,
+ DAC960_V1_LogicalDeviceInfo_T *ldev_info)
+{
+ unsigned char modes[32], *mode_pg;
+ bool dbd;
+ size_t mode_len;
+
+ dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+ if (dbd) {
+ mode_len = 24;
+ mode_pg = &modes[4];
+ } else {
+ mode_len = 32;
+ mode_pg = &modes[12];
+ }
+ memset(modes, 0, sizeof(modes));
+ modes[0] = mode_len - 1;
+ if (!dbd) {
+ unsigned char *block_desc = &modes[4];
+ modes[3] = 8;
+ put_unaligned_be32(ldev_info->Size, &block_desc[0]);
+ put_unaligned_be32(c->V1.LogicalBlockSize, &block_desc[5]);
+ }
+ mode_pg[0] = 0x08;
+ mode_pg[1] = 0x12;
+ if (ldev_info->WriteBack)
+ mode_pg[2] |= 0x04;
+ if (c->V1.SegmentSize) {
+ mode_pg[2] |= 0x08;
+ put_unaligned_be16(c->V1.SegmentSize, &mode_pg[14]);
+ }
+
+ scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static void mylex_v1_request_sense(DAC960_Controller_T *c,
+ struct scsi_cmnd *scmd)
+{
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ NO_SENSE, 0, 0);
+ scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+}
+
+static void
+mylex_v1_read_capacity(DAC960_Controller_T *c,
+ struct scsi_cmnd *scmd,
+ DAC960_V1_LogicalDeviceInfo_T *ldev_info)
+{
+ unsigned char data[8];
+
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Capacity %u, blocksize %u\n",
+ ldev_info->Size, c->V1.LogicalBlockSize);
+ put_unaligned_be32(ldev_info->Size - 1, &data[0]);
+ put_unaligned_be32(c->V1.LogicalBlockSize, &data[4]);
+ scsi_sg_copy_from_buffer(scmd, data, 8);
+}
+
+static int mylex_v1_ldev_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+ DAC960_V1_CommandBlock_T *cmd_blk = scsi_cmd_priv(scmd);
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V1_LogicalDeviceInfo_T *ldev_info;
+ struct scsi_device *sdev = scmd->device;
+ struct scatterlist *sgl;
+ unsigned long flags;
+ u64 lba;
+ u32 block_cnt;
+ int nsge;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info || ldev_info->State != DAC960_V1_Device_Online) {
+ scmd->result = (DID_BAD_TARGET << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ switch (scmd->cmnd[0]) {
+ case TEST_UNIT_READY:
+ scmd->result = (DID_OK << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ case INQUIRY:
+ if (scmd->cmnd[1] & 1) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ mylex_v1_inquiry(c, scmd);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ break;
+ case MODE_SENSE:
+ if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+ (scmd->cmnd[2] & 0x3F) != 0x08) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ mylex_v1_mode_sense(c, scmd, ldev_info);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ break;
+ case READ_CAPACITY:
+ if ((scmd->cmnd[1] & 1) ||
+ (scmd->cmnd[8] & 1)) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ if (lba) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ mylex_v1_read_capacity(c, scmd, ldev_info);
+ scmd->scsi_done(scmd);
+ return 0;
+ case REQUEST_SENSE:
+ mylex_v1_request_sense(c, scmd);
+ scmd->result = (DID_OK << 16);
+ return 0;
+ break;
+ case SEND_DIAGNOSTIC:
+ if (scmd->cmnd[1] != 0x04) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ /* Assume good status */
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ break;
+ case READ_6:
+ case WRITE_6:
+ lba = (((scmd->cmnd[1] & 0x1F) << 16) |
+ (scmd->cmnd[2] << 8) |
+ scmd->cmnd[3]);
+ block_cnt = scmd->cmnd[4];
+ break;
+ case READ_10:
+ case WRITE_10:
+ case VERIFY: /* 0x2F */
+ case WRITE_VERIFY: /* 0x2E */
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
+ break;
+ case READ_12:
+ case WRITE_12:
+ case VERIFY_12: /* 0xAF */
+ case WRITE_VERIFY_12: /* 0xAE */
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+ break;
+ default:
+ /* Illegal request, invalid opcode */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x20, 0);
+ scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type5.id = scmd->request->tag + 3;
+ if (scmd->sc_data_direction == DMA_NONE)
+ goto submit;
+ nsge = scsi_dma_map(scmd);
+ if (nsge == 1) {
+ sgl = scsi_sglist(scmd);
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mbox->Type5.opcode = DAC960_V1_Read;
+ else
+ mbox->Type5.opcode = DAC960_V1_Write;
+
+ mbox->Type5.LD.TransferLength = block_cnt;
+ mbox->Type5.LD.LogicalDriveNumber = sdev->id;
+ mbox->Type5.LogicalBlockAddress = lba;
+ mbox->Type5.BusAddress = (u32)sg_dma_address(sgl);
+ } else {
+ DAC960_V1_ScatterGatherSegment_T *hw_sgl;
+ dma_addr_t hw_sgl_addr;
+ int i;
+
+ hw_sgl = pci_pool_alloc(c->ScatterGatherPool,
+ GFP_ATOMIC, &hw_sgl_addr);
+ if (!hw_sgl)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ cmd_blk->sgl = hw_sgl;
+ cmd_blk->sgl_addr = hw_sgl_addr;
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mbox->Type5.opcode = DAC960_V1_ReadWithScatterGather;
+ else
+ mbox->Type5.opcode = DAC960_V1_WriteWithScatterGather;
+
+ mbox->Type5.LD.TransferLength = block_cnt;
+ mbox->Type5.LD.LogicalDriveNumber = sdev->id;
+ mbox->Type5.LogicalBlockAddress = lba;
+ mbox->Type5.BusAddress = hw_sgl_addr;
+ mbox->Type5.ScatterGatherCount = nsge;
+
+ scsi_for_each_sg(scmd, sgl, nsge, i) {
+ hw_sgl->SegmentDataPointer = (u32)sg_dma_address(sgl);
+ hw_sgl->SegmentByteCount = (u32)sg_dma_len(sgl);
+ hw_sgl++;
+ }
+ }
+submit:
+ spin_lock_irqsave(&c->queue_lock, flags);
+ c->V1.QueueCommand(c, cmd_blk);
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+
+ return 0;
+}
+
+static int mylex_v1_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ DAC960_Controller_T *c =
+ (DAC960_Controller_T *)shost->hostdata;
+ struct scsi_device *sdev = scmd->device;
+
+ if (sdev->channel > c->host->max_channel) {
+ scmd->result = (DID_BAD_TARGET << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ if (sdev->channel >= c->PhysicalChannelCount)
+ return mylex_v1_ldev_queuecommand(shost, scmd);
+
+ return mylex_v1_pthru_queuecommand(shost, scmd);
+}
+
+static int mylex_v1_slave_alloc(struct scsi_device *sdev)
+{
+ DAC960_Controller_T *c =
+ (DAC960_Controller_T *)sdev->host->hostdata;
+ unsigned short status;
+
+ if (sdev->channel > c->host->max_channel)
+ return -ENXIO;
+
+ if (sdev->lun > 0)
+ return -ENXIO;
+
+ if (sdev->channel >= c->PhysicalChannelCount) {
+ DAC960_V1_LogicalDeviceInfo_T *ldev_info;
+ unsigned short ldev_num;
+
+ ldev_num = mylex_translate_ldev(c, sdev);
+ ldev_info = c->V1.LogicalDeviceInfo[ldev_num];
+ if (ldev_info) {
+ enum raid_level level;
+
+ sdev->hostdata = kzalloc(sizeof(*ldev_info),
+ GFP_KERNEL);
+ if (!sdev->hostdata)
+ return -ENOMEM;
+ memcpy(sdev->hostdata, ldev_info,
+ sizeof(*ldev_info));
+ switch (ldev_info->RAIDLevel) {
+ case DAC960_V1_RAID_Level0:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case DAC960_V1_RAID_Level1:
+ level = RAID_LEVEL_1;
+ break;
+ case DAC960_V1_RAID_Level3:
+ level = RAID_LEVEL_3;
+ break;
+ case DAC960_V1_RAID_Level5:
+ level = RAID_LEVEL_5;
+ break;
+ case DAC960_V1_RAID_Level6:
+ level = RAID_LEVEL_6;
+ break;
+ case DAC960_V1_RAID_JBOD:
+ level = RAID_LEVEL_JBOD;
+ break;
+ default:
+ level = RAID_LEVEL_UNKNOWN;
+ break;
+ }
+ raid_set_level(mylex_v1_raid_template,
+ &sdev->sdev_gendev, level);
+ }
+ return 0;
+ }
+
+ status = DAC960_V1_ExecuteType3D(c, DAC960_V1_GetDeviceState, sdev);
+ if (status != DAC960_V1_NormalCompletion) {
+ dev_dbg(&sdev->sdev_gendev,
+ "Failed to get device state, status %x\n", status);
+ }
+ return 0;
+}
+
+int mylex_v1_slave_configure(struct scsi_device *sdev)
+{
+ DAC960_Controller_T *c =
+ (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V1_LogicalDeviceInfo_T *ldev_info;
+
+ if (sdev->channel > c->host->max_id)
+ return -ENXIO;
+
+ if (sdev->channel < c->PhysicalChannelCount) {
+ sdev->no_uld_attach = 1;
+ return 0;
+ }
+ if (sdev->lun != 0)
+ return -ENXIO;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ if (ldev_info->State != DAC960_V1_Device_Online)
+ sdev_printk(KERN_INFO, sdev,
+ "Logical drive is %s\n",
+ DAC960_V1_DriveStateName(ldev_info->State));
+
+ sdev->tagged_supported = 1;
+ return 0;
+}
+
+static void mylex_v1_slave_destroy(struct scsi_device *sdev)
+{
+ void *hostdata = sdev->hostdata;
+
+ if (hostdata) {
+ kfree(hostdata);
+ sdev->hostdata = NULL;
+ }
+}
+
+static ssize_t mylex_v1_show_dev_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ int ret;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel >= c->PhysicalChannelCount) {
+ DAC960_V1_LogicalDeviceInfo_T *ldev_info =
+ sdev->hostdata;
+ const char *name;
+
+ name = DAC960_V1_DriveStateName(ldev_info->State);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->State);
+ } else {
+ DAC960_V1_DeviceState_T *pdev_info = sdev->hostdata;
+ unsigned short status;
+ const char *name;
+
+ status = DAC960_V1_ExecuteType3D(c, DAC960_V1_GetDeviceState,
+ sdev);
+ if (status != DAC960_V1_NormalCompletion)
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device state, status %x\n",
+ status);
+
+ if (!pdev_info->Present)
+ name = "Removed";
+ else
+ name = DAC960_V1_DriveStateName(pdev_info->State);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ pdev_info->State);
+ }
+ return ret;
+}
+
+static ssize_t mylex_v1_store_dev_state(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V1_DeviceState_T *pdev_info;
+ DAC960_V1_DriveState_T new_state;
+ unsigned short status;
+
+ if (!strncmp(buf, "kill", 4) ||
+ !strncmp(buf, "offline", 7))
+ new_state = DAC960_V1_Device_Dead;
+ else if (!strncmp(buf, "online", 6))
+ new_state = DAC960_V1_Device_Online;
+ else if (!strncmp(buf, "standby", 7))
+ new_state = DAC960_V1_Device_Standby;
+ else
+ return -EINVAL;
+
+ pdev_info = sdev->hostdata;
+ if (!pdev_info) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - no physical device information\n");
+ return -ENXIO;
+ }
+ if (!pdev_info->Present) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - device not present\n");
+ return -ENXIO;
+ }
+
+ if (pdev_info->State == new_state)
+ return count;
+
+ status = DAC960_V1_SetDeviceState(c, sdev, new_state);
+ switch (status) {
+ case DAC960_V1_NormalCompletion:
+ break;
+ case DAC960_V1_UnableToStartDevice:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Unable to Start Device\n");
+ count = -EAGAIN;
+ break;
+ case DAC960_V1_NoDeviceAtAddress:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - No Device at Address\n");
+ count = -ENODEV;
+ break;
+ case DAC960_V1_InvalidChannelOrTargetOrModifier:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Invalid Channel or Target or Modifier\n");
+ count = -EINVAL;
+ break;
+ case DAC960_V1_ChannelBusy:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Channel Busy\n");
+ count = -EBUSY;
+ break;
+ default:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Unexpected Status %04X\n", status);
+ count = -EIO;
+ break;
+ }
+ return count;
+}
+
+static ssize_t mylex_v2_show_dev_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ int ret;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel >= c->PhysicalChannelCount) {
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info = sdev->hostdata;
+ const char *name;
+
+ name = DAC960_V2_DriveStateName(ldev_info->State);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->State);
+ } else {
+ DAC960_V2_PhysicalDeviceInfo_T *pdev_info;
+ const char *name;
+
+ pdev_info = sdev->hostdata;
+ name = DAC960_V2_DriveStateName(pdev_info->State);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ pdev_info->State);
+ }
+ return ret;
+}
+
+static ssize_t mylex_v2_store_dev_state(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V2_CommandBlock_T *cmd_blk;
+ DAC960_V2_CommandMailbox_T *mbox;
+ DAC960_V2_DriveState_T new_state;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (!strncmp(buf, "offline", 7) ||
+ !strncmp(buf, "kill", 4))
+ new_state = DAC960_V2_Device_Offline;
+ else if (!strncmp(buf, "online", 6))
+ new_state = DAC960_V2_Device_Online;
+ else if (!strncmp(buf, "standby", 7))
+ new_state = DAC960_V2_Device_Standby;
+ else
+ return -EINVAL;
+
+ if (sdev->channel < c->PhysicalChannelCount) {
+ DAC960_V2_PhysicalDeviceInfo_T *pdev_info = sdev->hostdata;
+
+ if (pdev_info->State == new_state) {
+ sdev_printk(KERN_INFO, sdev,
+ "Device already in %s\n",
+ DAC960_V2_DriveStateName(new_state));
+ return count;
+ }
+ status = DAC960_V2_TranslatePhysicalDevice(c, sdev->channel,
+ sdev->id, sdev->lun,
+ &ldev_num);
+ if (status != DAC960_V2_NormalCompletion)
+ return -ENXIO;
+ } else {
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info = sdev->hostdata;
+
+ if (ldev_info->State == new_state) {
+ sdev_printk(KERN_INFO, sdev,
+ "Device already in %s\n",
+ DAC960_V2_DriveStateName(new_state));
+ return count;
+ }
+ ldev_num = ldev_info->LogicalDeviceNumber;
+ }
+ mutex_lock(&c->V2.dcmd_mutex);
+ cmd_blk = &c->V2.DirectCommandBlock;
+ DAC960_V2_ClearCommand(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->Common.opcode = DAC960_V2_IOCTL;
+ mbox->Common.id = DAC960_DirectCommandIdentifier;
+ mbox->Common.control.DataTransferControllerToHost = true;
+ mbox->Common.control.NoAutoRequestSense = true;
+ mbox->SetDeviceState.IOCTL_Opcode = DAC960_V2_SetDeviceState;
+ mbox->SetDeviceState.State = new_state;
+ mbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber = ldev_num;
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V2.dcmd_mutex);
+ if (status == DAC960_V2_NormalCompletion) {
+ if (sdev->channel < c->PhysicalChannelCount) {
+ DAC960_V2_PhysicalDeviceInfo_T *pdev_info =
+ sdev->hostdata;
+
+ pdev_info->State = new_state;
+ } else {
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info =
+ sdev->hostdata;
+
+ ldev_info->State = new_state;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "Set device state to %s\n",
+ DAC960_V2_DriveStateName(new_state));
+ return count;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to set device state to %s, status 0x%02x\n",
+ DAC960_V2_DriveStateName(new_state),
+ status);
+ return -EINVAL;
+}
+
+static ssize_t mylex_show_dev_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+
+ if (c->FirmwareType == DAC960_V1_Controller)
+ return mylex_v1_show_dev_state(dev, attr, buf);
+ else
+ return mylex_v2_show_dev_state(dev, attr, buf);
+}
+
+static ssize_t mylex_store_dev_state(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+
+ if (c->FirmwareType == DAC960_V1_Controller)
+ return mylex_v1_store_dev_state(dev, attr, buf, count);
+ else
+ return mylex_v2_store_dev_state(dev, attr, buf, count);
+}
+
+static DEVICE_ATTR(raid_state, S_IRUGO | S_IWUSR, mylex_show_dev_state,
+ mylex_store_dev_state);
+
+static ssize_t mylex_v1_show_dev_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+
+ if (sdev->channel >= c->PhysicalChannelCount) {
+ DAC960_V1_LogicalDeviceInfo_T *ldev_info = sdev->hostdata;
+ const char *name;
+
+ if (!ldev_info)
+ return -ENXIO;
+
+ name = DAC960_V1_RAIDLevelName(ldev_info->RAIDLevel);
+ if (!name)
+ return snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->State);
+ return snprintf(buf,32, "%s\n", name);
+ }
+ return snprintf(buf, 32, "Physical Drive\n");
+}
+
+static ssize_t mylex_v2_show_dev_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ const char *name = NULL;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel >= c->PhysicalChannelCount) {
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info;
+
+ ldev_info = sdev->hostdata;
+ name = DAC960_V2_RAIDLevelName(ldev_info->RAIDLevel);
+ if (!name)
+ return snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->State);
+
+ } else
+ name = DAC960_V2_RAIDLevelName(DAC960_V2_RAID_Physical);
+
+ return snprintf(buf, 32, "%s\n", name);
+}
+
+static ssize_t mylex_show_dev_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+
+ if (c->FirmwareType == DAC960_V1_Controller)
+ return mylex_v1_show_dev_level(dev, attr, buf);
+ else
+ return mylex_v2_show_dev_level(dev, attr, buf);
+}
+static DEVICE_ATTR(raid_level, S_IRUGO, mylex_show_dev_level, NULL);
+
+static ssize_t mylex_show_dev_rebuild(struct device *,
+ struct device_attribute *, char *);
+static ssize_t mylex_store_dev_rebuild(struct device *,
+ struct device_attribute *,
+ const char *, size_t);
+static DEVICE_ATTR(rebuild, S_IRUGO | S_IWUSR, mylex_show_dev_rebuild,
+ mylex_store_dev_rebuild);
+
+static ssize_t mylex_v1_show_dev_rebuild(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V1_CommandBlock_T *cmd_blk = &c->V1.MonitoringCommandBlock;
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ unsigned short ldev_num = 0xffff;
+ unsigned char status;
+ bool rebuild = false;
+ ssize_t ldev_size, remaining;
+
+ if (sdev->channel < c->PhysicalChannelCount)
+ return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+ if (attr == &dev_attr_rebuild)
+ rebuild = true;
+
+ mutex_lock(&c->V1.dcmd_mutex);
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3.id = DAC960_MonitoringIdentifier;
+ mbox->Type3.opcode = DAC960_V1_GetRebuildProgress;
+ mbox->Type3.BusAddress = c->V1.RebuildProgressDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ if (status == DAC960_V1_NormalCompletion) {
+ ldev_num = c->V1.RebuildProgress->LogicalDriveNumber;
+ ldev_size = c->V1.RebuildProgress->LogicalDriveSize;
+ remaining = c->V1.RebuildProgress->RemainingBlocks;
+ }
+ mutex_unlock(&c->V1.dcmd_mutex);
+
+ if (ldev_num != mylex_translate_ldev(c, sdev) ||
+ status != DAC960_V1_NormalCompletion)
+ return snprintf(buf, 32, "not %s\n",
+ rebuild ? "rebuilding" : "checking");
+
+ if (c->V1.Enquiry.RebuildFlag == DAC960_V1_BackgroundCheckInProgress &&
+ rebuild)
+ return snprintf(buf, 32, "not rebuilding\n");
+ else if (!rebuild &&
+ c->V1.Enquiry.RebuildFlag ==
+ DAC960_V1_BackgroundRebuildInProgress)
+ return snprintf(buf, 32, "not checking\n");
+
+ return snprintf(buf, 32, "%s block %zu of %zu\n",
+ rebuild ? "rebuilding" : "checking",
+ ldev_size - remaining, ldev_size);
+}
+
+static ssize_t mylex_v2_show_dev_rebuild(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (sdev->channel < c->PhysicalChannelCount)
+ return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+ ldev_info = sdev->hostdata;
+ ldev_num = ldev_info->LogicalDeviceNumber;
+ status = DAC960_V2_NewLogicalDeviceInfo(c, ldev_num, ldev_info);
+ if (ldev_info->RebuildInProgress) {
+ return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
+ (size_t)ldev_info->RebuildBlockNumber,
+ (size_t)ldev_info->ConfigurableDeviceSize);
+ } else
+ return snprintf(buf, 32, "not rebuilding\n");
+}
+
+static ssize_t mylex_v1_store_dev_rebuild(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V1_CommandBlock_T *cmd_blk;
+ DAC960_V1_CommandMailbox_T *mbox;
+ char tmpbuf[8];
+ ssize_t len;
+ unsigned short ldev_num = 0xFFFF;
+ unsigned short status;
+ int start;
+ bool rebuild = false;
+ const char *msg;
+
+ if (attr == &dev_attr_rebuild)
+ rebuild = true;
+
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+ strncpy(tmpbuf, buf, len);
+ tmpbuf[len] = '\0';
+ if (sscanf(tmpbuf, "%d", &start) != 1)
+ return -EINVAL;
+
+ if (rebuild && start && sdev->channel >= c->PhysicalChannelCount)
+ return -ENXIO;
+ else if (sdev->channel < c->PhysicalChannelCount)
+ return -ENXIO;
+ mutex_lock(&c->V1.dcmd_mutex);
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox->Type3.id = DAC960_MonitoringIdentifier;
+ mbox->Type3.opcode = DAC960_V1_GetRebuildProgress;
+ mbox->Type3.BusAddress = c->V1.RebuildProgressDMA;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ if (status == DAC960_V1_NormalCompletion)
+ ldev_num = c->V1.RebuildProgress->LogicalDriveNumber;
+ mutex_unlock(&c->V1.dcmd_mutex);
+
+ if (start) {
+ if (status != DAC960_V1_NormalCompletion) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s Not Initiated; already in progress\n",
+ rebuild ? "Rebuild" : "Check Consistency");
+ return -EALREADY;
+ }
+ mutex_lock(&c->V1.dcmd_mutex);
+ cmd_blk = &c->V1.DirectCommandBlock;
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ if (rebuild) {
+ mbox->Type3D.opcode = DAC960_V1_RebuildAsync;
+ mbox->Type3D.id = DAC960_DirectCommandIdentifier;
+ mbox->Type3D.Channel = sdev->channel;
+ mbox->Type3D.TargetID = sdev->id;
+ } else {
+ ldev_num = mylex_translate_ldev(c, sdev);
+ mbox->Type3C.opcode = DAC960_V1_CheckConsistencyAsync;
+ mbox->Type3C.id = DAC960_DirectCommandIdentifier;
+ mbox->Type3C.LogicalDriveNumber = ldev_num;
+ mbox->Type3C.AutoRestore = true;
+ }
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V1.dcmd_mutex);
+ } else {
+ struct pci_dev *pdev = c->PCIDevice;
+ unsigned char *rate;
+ dma_addr_t rate_addr;
+
+ if (ldev_num != mylex_translate_ldev(c, sdev)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s Not Cancelled; not in progress\n",
+ rebuild ? "Rebuild" : "Check Consistency");
+ return 0;
+ }
+ rate = pci_alloc_consistent(pdev, sizeof(char), &rate_addr);
+ if (rate == NULL) {
+ sdev_printk(KERN_INFO, sdev,
+ "Cancellation of %s Failed - "
+ "Out of Memory\n",
+ rebuild ? "Rebuild" : "Check Consistency");
+ return -ENOMEM;
+ }
+ mutex_lock(&c->V1.dcmd_mutex);
+ cmd_blk = &c->V1.DirectCommandBlock;
+ DAC960_V1_ClearCommand(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->Type3R.opcode = DAC960_V1_RebuildControl;
+ mbox->Type3R.id = DAC960_DirectCommandIdentifier;
+ mbox->Type3R.RebuildRateConstant = 0xFF;
+ mbox->Type3R.BusAddress = rate_addr;
+ DAC960_V1_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ pci_free_consistent(pdev, sizeof(char), rate, rate_addr);
+ mutex_unlock(&c->V1.dcmd_mutex);
+ }
+ if (status == DAC960_V1_NormalCompletion) {
+ sdev_printk(KERN_INFO, sdev, "%s %s\n",
+ rebuild ? "Rebuild" : "Check Consistency",
+ start ? "Initiated" : "Cancelled");
+ return count;
+ }
+ if (!start) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s Not Cancelled, status 0x%x\n",
+ rebuild ? "Rebuild" : "Check Consistency",
+ status);
+ return -EIO;
+ }
+
+ switch (status) {
+ case DAC960_V1_AttemptToRebuildOnlineDrive:
+ if (rebuild)
+ msg = "Attempt to Rebuild Online or Unresponsive Drive";
+ else
+ msg = "Dependent Physical Device is DEAD";
+ sdev_printk(KERN_INFO, sdev,
+ "%s Failed - %s\n",
+ rebuild ? "Rebuild" : "Check Consistency", msg);
+ break;
+ case DAC960_V1_NewDiskFailedDuringRebuild:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed - "
+ "New Disk Failed During Rebuild\n");
+ break;
+ case DAC960_V1_InvalidDeviceAddress:
+ if (rebuild)
+ msg = "Invalid Device Address";
+ else
+ msg = "Invalid or Nonredundant Logical Drive";
+ sdev_printk(KERN_INFO, sdev,
+ "%s Failed - %s\n",
+ rebuild ? "Rebuild" : "Check Consistency", msg);
+ break;
+ case DAC960_V1_RebuildOrCheckAlreadyInProgress:
+ sdev_printk(KERN_INFO, sdev,
+ "%s Failed - Already in Progress\n",
+ rebuild ? "Rebuild" : "Check Consistency");
+ break;
+ default:
+ sdev_printk(KERN_INFO, sdev,
+ "%s Failed, status 0x%x\n",
+ rebuild ? "Rebuild" : "Check Consistency", status);
+ }
+ return -EIO;
+}
+
+static ssize_t mylex_v2_store_dev_rebuild(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info;
+ DAC960_V2_CommandBlock_T *cmd_blk;
+ DAC960_V2_CommandMailbox_T *mbox;
+ char tmpbuf[8];
+ ssize_t len;
+ unsigned short ldev_num;
+ unsigned char status;
+ int rebuild;
+ int ret = count;
+
+ if (sdev->channel < c->PhysicalChannelCount)
+ return -EINVAL;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->LogicalDeviceNumber;
+
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+ strncpy(tmpbuf, buf, len);
+ tmpbuf[len] = '\0';
+ if (sscanf(tmpbuf, "%d", &rebuild) != 1)
+ return -EINVAL;
+
+ status = DAC960_V2_NewLogicalDeviceInfo(c, ldev_num, ldev_info);
+ if (status != DAC960_V2_NormalCompletion) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+
+ if (rebuild && ldev_info->RebuildInProgress) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Initiated; already in progress\n");
+ return -EALREADY;
+ }
+ if (!rebuild && !ldev_info->RebuildInProgress) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Cancelled; no rebuild in progress\n");
+ return ret;
+ }
+
+ mutex_lock(&c->V2.dcmd_mutex);
+ cmd_blk = &c->V2.DirectCommandBlock;
+ DAC960_V2_ClearCommand(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->Common.opcode = DAC960_V2_IOCTL;
+ mbox->Common.id = DAC960_DirectCommandIdentifier;
+ mbox->Common.control.DataTransferControllerToHost = true;
+ mbox->Common.control.NoAutoRequestSense = true;
+ if (rebuild) {
+ mbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
+ ldev_num;
+ mbox->LogicalDeviceInfo.IOCTL_Opcode =
+ DAC960_V2_RebuildDeviceStart;
+ } else {
+ mbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
+ ldev_num;
+ mbox->LogicalDeviceInfo.IOCTL_Opcode =
+ DAC960_V2_RebuildDeviceStop;
+ }
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V2.dcmd_mutex);
+ if (status) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not %s, status 0x%02x\n",
+ rebuild ? "Initiated" : "Cancelled", status);
+ ret = -EIO;
+ } else
+ sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+ rebuild ? "Initiated" : "Cancelled");
+
+ return ret;
+}
+
+static ssize_t mylex_show_dev_rebuild(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+
+ if (c->FirmwareType == DAC960_V1_Controller)
+ return mylex_v1_show_dev_rebuild(dev, attr, buf);
+ else
+ return mylex_v2_show_dev_rebuild(dev, attr, buf);
+}
+
+static ssize_t mylex_store_dev_rebuild(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+
+ if (c->FirmwareType == DAC960_V1_Controller)
+ return mylex_v1_store_dev_rebuild(dev, attr, buf, count);
+ else
+ return mylex_v2_store_dev_rebuild(dev, attr, buf, count);
+}
+
+static ssize_t mylex_v2_show_consistency_check(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (sdev->channel < c->PhysicalChannelCount)
+ return snprintf(buf, 32, "physical device - not checking\n");
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->LogicalDeviceNumber;
+ status = DAC960_V2_NewLogicalDeviceInfo(c, ldev_num, ldev_info);
+ if (ldev_info->ConsistencyCheckInProgress)
+ return snprintf(buf, 32, "checking block %zu of %zu\n",
+ (size_t)ldev_info->ConsistencyCheckBlockNumber,
+ (size_t)ldev_info->ConfigurableDeviceSize);
+ else
+ return snprintf(buf, 32, "not checking\n");
+}
+
+static ssize_t mylex_v2_store_consistency_check(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info;
+ DAC960_V2_CommandBlock_T *cmd_blk;
+ DAC960_V2_CommandMailbox_T *mbox;
+ char tmpbuf[8];
+ ssize_t len;
+ unsigned short ldev_num;
+ unsigned char status;
+ int check;
+ int ret = count;
+
+ if (sdev->channel < c->PhysicalChannelCount)
+ return -EINVAL;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->LogicalDeviceNumber;
+
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+ strncpy(tmpbuf, buf, len);
+ tmpbuf[len] = '\0';
+ if (sscanf(tmpbuf, "%d", &check) != 1)
+ return -EINVAL;
+
+ status = DAC960_V2_NewLogicalDeviceInfo(c, ldev_num, ldev_info);
+ if (status != DAC960_V2_NormalCompletion) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+ if (check && ldev_info->ConsistencyCheckInProgress) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not Initiated; "
+ "already in progress\n");
+ return -EALREADY;
+ }
+ if (!check && !ldev_info->ConsistencyCheckInProgress) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not Cancelled; "
+ "check not in progress\n");
+ return ret;
+ }
+
+ mutex_lock(&c->V2.dcmd_mutex);
+ cmd_blk = &c->V2.DirectCommandBlock;
+ DAC960_V2_ClearCommand(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->Common.opcode = DAC960_V2_IOCTL;
+ mbox->Common.id = DAC960_DirectCommandIdentifier;
+ mbox->Common.control.DataTransferControllerToHost = true;
+ mbox->Common.control.NoAutoRequestSense = true;
+ if (check) {
+ mbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
+ ldev_num;
+ mbox->ConsistencyCheck.IOCTL_Opcode =
+ DAC960_V2_ConsistencyCheckStart;
+ mbox->ConsistencyCheck.RestoreConsistency = true;
+ mbox->ConsistencyCheck.InitializedAreaOnly = false;
+ } else {
+ mbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
+ ldev_num;
+ mbox->ConsistencyCheck.IOCTL_Opcode =
+ DAC960_V2_ConsistencyCheckStop;
+ }
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V2.dcmd_mutex);
+ if (status != DAC960_V2_NormalCompletion) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not %s, status 0x%02x\n",
+ check ? "Initiated" : "Cancelled", status);
+ ret = -EIO;
+ } else
+ sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
+ check ? "Initiated" : "Cancelled");
+
+ return ret;
+}
+
+static ssize_t mylex_show_dev_consistency_check(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+
+ if (c->FirmwareType == DAC960_V1_Controller)
+ return mylex_v1_show_dev_rebuild(dev, attr, buf);
+ else
+ return mylex_v2_show_consistency_check(dev, attr, buf);
+}
+
+static ssize_t mylex_store_dev_consistency_check(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+
+ if (c->FirmwareType == DAC960_V1_Controller)
+ return mylex_v1_store_dev_rebuild(dev, attr, buf, count);
+ else
+ return mylex_v2_store_consistency_check(dev, attr, buf, count);
+}
+static DEVICE_ATTR(consistency_check, S_IRUGO | S_IWUSR,
+ mylex_show_dev_consistency_check,
+ mylex_store_dev_consistency_check);
+
+static ssize_t mylex_show_ctlr_num(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+
+ return snprintf(buf, 20, "%d\n", c->ControllerNumber);
+}
+static DEVICE_ATTR(mylex_num, S_IRUGO, mylex_show_ctlr_num, NULL);
+
+static ssize_t mylex_show_firmware_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+
+ return snprintf(buf, 16, "%s\n", c->FirmwareVersion);
+}
+static DEVICE_ATTR(firmware, S_IRUGO, mylex_show_firmware_version, NULL);
+
+static ssize_t mylex_v1_store_flush_cache(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+ unsigned short status;
+
+ status = DAC960_V1_ExecuteType3(c, DAC960_V1_Flush, 0);
+ if (status == DAC960_V1_NormalCompletion) {
+ shost_printk(KERN_INFO, c->host,
+ "Cache Flush Completed\n");
+ return count;
+ }
+ shost_printk(KERN_INFO, c->host,
+ "Cache Flush Failed, status %x\n", status);
+ return -EIO;
+}
+
+static ssize_t mylex_v2_store_flush_cache(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+ unsigned char status;
+
+ status = DAC960_V2_DeviceOperation(c, DAC960_V2_PauseDevice,
+ DAC960_V2_RAID_Controller);
+ if (status == DAC960_V2_NormalCompletion) {
+ shost_printk(KERN_INFO, c->host,
+ "Cache Flush Completed\n");
+ return count;
+ }
+ shost_printk(KERN_INFO, c->host,
+ "Cashe Flush failed, status 0x%02x\n",
+ status);
+ return -EIO;
+}
+
+static ssize_t mylex_store_flush_cache(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+
+ if (c->FirmwareType == DAC960_V1_Controller)
+ return mylex_v1_store_flush_cache(dev, attr, buf, count);
+ else
+ return mylex_v2_store_flush_cache(dev, attr, buf, count);
+}
+static DEVICE_ATTR(flush_cache, S_IWUSR, NULL, mylex_store_flush_cache);
+
+static struct device_attribute *mylex_v1_sdev_attrs[] = {
+ &dev_attr_rebuild,
+ &dev_attr_consistency_check,
+ &dev_attr_raid_state,
+ &dev_attr_raid_level,
+ NULL,
+};
+
+static struct device_attribute *mylex_v1_shost_attrs[] = {
+ &dev_attr_mylex_num,
+ &dev_attr_firmware,
+ &dev_attr_flush_cache,
+ NULL,
+};
+
+struct scsi_host_template mylex_v1_template = {
+ .module = THIS_MODULE,
+ .name = DAC960_DriverName,
+ .proc_name = "mylex",
+ .queuecommand = mylex_v1_queuecommand,
+ .eh_host_reset_handler = DAC960_host_reset,
+ .slave_alloc = mylex_v1_slave_alloc,
+ .slave_configure = mylex_v1_slave_configure,
+ .slave_destroy = mylex_v1_slave_destroy,
+ .cmd_size = sizeof(DAC960_V1_CommandBlock_T),
+ .shost_attrs = mylex_v1_shost_attrs,
+ .sdev_attrs = mylex_v1_sdev_attrs,
+ .this_id = -1,
+};
+
+static int mylex_v2_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+ DAC960_V2_CommandBlock_T *cmd_blk = scsi_cmd_priv(scmd);
+ DAC960_V2_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ struct scsi_device *sdev = scmd->device;
+ DAC960_V2_DataTransferMemoryAddress_T *dma_addr;
+ dma_addr_t sense_addr;
+ struct scatterlist *sgl;
+ unsigned long flags, timeout;
+ int nsge;
+
+ if (!scmd->device->hostdata) {
+ scmd->result = (DID_NO_CONNECT << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (scmd->cmnd[0] == REPORT_LUNS) {
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x20, 0x0);
+ scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ DAC960_V2_ClearCommand(cmd_blk);
+ cmd_blk->sense = pci_pool_alloc(c->V2.RequestSensePool, GFP_ATOMIC,
+ &sense_addr);
+ if (!cmd_blk->sense)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ cmd_blk->sense_addr = sense_addr;
+
+ timeout = scmd->request->timeout;
+ if (scmd->cmd_len <= 10) {
+ if (scmd->device->channel >= c->PhysicalChannelCount) {
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info =
+ sdev->hostdata;
+
+ mbox->SCSI_10.opcode = DAC960_V2_SCSI_10;
+ mbox->SCSI_10.PhysicalDevice.LogicalUnit =
+ ldev_info->LogicalUnit;
+ mbox->SCSI_10.PhysicalDevice.TargetID =
+ ldev_info->TargetID;
+ mbox->SCSI_10.PhysicalDevice.Channel =
+ ldev_info->Channel;
+ mbox->SCSI_10.PhysicalDevice.Controller = 0;
+ } else {
+ mbox->SCSI_10.opcode =
+ DAC960_V2_SCSI_10_Passthru;
+ mbox->SCSI_10.PhysicalDevice.LogicalUnit = sdev->lun;
+ mbox->SCSI_10.PhysicalDevice.TargetID = sdev->id;
+ mbox->SCSI_10.PhysicalDevice.Channel = sdev->channel;
+ }
+ mbox->SCSI_10.id = scmd->request->tag + 3;
+ mbox->SCSI_10.control.DataTransferControllerToHost =
+ (scmd->sc_data_direction == DMA_FROM_DEVICE);
+ mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
+ mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
+ mbox->SCSI_10.sense_len = DAC960_V2_SENSE_BUFFERSIZE;
+ mbox->SCSI_10.CDBLength = scmd->cmd_len;
+ if (timeout > 60) {
+ mbox->SCSI_10.tmo.TimeoutScale =
+ DAC960_V2_TimeoutScale_Minutes;
+ mbox->SCSI_10.tmo.TimeoutValue = timeout / 60;
+ } else {
+ mbox->SCSI_10.tmo.TimeoutScale =
+ DAC960_V2_TimeoutScale_Seconds;
+ mbox->SCSI_10.tmo.TimeoutValue = timeout;
+ }
+ memcpy(&mbox->SCSI_10.SCSI_CDB, scmd->cmnd, scmd->cmd_len);
+ dma_addr = &mbox->SCSI_10.dma_addr;
+ cmd_blk->DCDB = NULL;
+ } else {
+ dma_addr_t DCDB_dma;
+
+ cmd_blk->DCDB = pci_pool_alloc(c->V2.DCDBPool, GFP_ATOMIC,
+ &DCDB_dma);
+ if (!cmd_blk->DCDB) {
+ pci_pool_free(c->V2.RequestSensePool, cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ cmd_blk->DCDB_dma = DCDB_dma;
+ if (scmd->device->channel >= c->PhysicalChannelCount) {
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info =
+ sdev->hostdata;
+
+ mbox->SCSI_255.opcode = DAC960_V2_SCSI_256;
+ mbox->SCSI_255.PhysicalDevice.LogicalUnit =
+ ldev_info->LogicalUnit;
+ mbox->SCSI_255.PhysicalDevice.TargetID =
+ ldev_info->TargetID;
+ mbox->SCSI_255.PhysicalDevice.Channel =
+ ldev_info->Channel;
+ mbox->SCSI_255.PhysicalDevice.Controller = 0;
+ } else {
+ mbox->SCSI_255.opcode =
+ DAC960_V2_SCSI_255_Passthru;
+ mbox->SCSI_255.PhysicalDevice.LogicalUnit = sdev->lun;
+ mbox->SCSI_255.PhysicalDevice.TargetID = sdev->id;
+ mbox->SCSI_255.PhysicalDevice.Channel = sdev->channel;
+ }
+ mbox->SCSI_255.id = scmd->request->tag + 3;
+ mbox->SCSI_255.control.DataTransferControllerToHost =
+ (scmd->sc_data_direction == DMA_FROM_DEVICE);
+ mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
+ mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
+ mbox->SCSI_255.sense_len = DAC960_V2_SENSE_BUFFERSIZE;
+ mbox->SCSI_255.CDBLength = scmd->cmd_len;
+ mbox->SCSI_255.SCSI_CDB_BusAddress = cmd_blk->DCDB_dma;
+ if (timeout > 60) {
+ mbox->SCSI_255.tmo.TimeoutScale =
+ DAC960_V2_TimeoutScale_Minutes;
+ mbox->SCSI_255.tmo.TimeoutValue = timeout / 60;
+ } else {
+ mbox->SCSI_255.tmo.TimeoutScale =
+ DAC960_V2_TimeoutScale_Seconds;
+ mbox->SCSI_255.tmo.TimeoutValue = timeout;
+ }
+ memcpy(cmd_blk->DCDB, scmd->cmnd, scmd->cmd_len);
+ dma_addr = &mbox->SCSI_255.dma_addr;
+ }
+ if (scmd->sc_data_direction == DMA_NONE)
+ goto submit;
+ nsge = scsi_dma_map(scmd);
+ if (nsge == 1) {
+ sgl = scsi_sglist(scmd);
+ dma_addr->ScatterGatherSegments[0].SegmentDataPointer =
+ (u64)sg_dma_address(sgl);
+ dma_addr->ScatterGatherSegments[0].SegmentByteCount =
+ (u64)sg_dma_len(sgl);
+ } else {
+ DAC960_V2_ScatterGatherSegment_T *hw_sgl;
+ dma_addr_t hw_sgl_addr;
+ int i;
+
+ if (nsge > 2) {
+ hw_sgl = pci_pool_alloc(c->ScatterGatherPool,
+ GFP_ATOMIC, &hw_sgl_addr);
+ if (WARN_ON(!hw_sgl)) {
+ if (cmd_blk->DCDB) {
+ pci_pool_free(c->V2.DCDBPool,
+ cmd_blk->DCDB,
+ cmd_blk->DCDB_dma);
+ cmd_blk->DCDB = NULL;
+ cmd_blk->DCDB_dma = 0;
+ }
+ pci_pool_free(c->V2.RequestSensePool,
+ cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ cmd_blk->sgl = hw_sgl;
+ cmd_blk->sgl_addr = hw_sgl_addr;
+ if (scmd->cmd_len <= 10)
+ mbox->SCSI_10.control
+ .AdditionalScatterGatherListMemory = true;
+ else
+ mbox->SCSI_255.control
+ .AdditionalScatterGatherListMemory = true;
+ dma_addr->ExtendedScatterGather.ScatterGatherList0Length = nsge;
+ dma_addr->ExtendedScatterGather.ScatterGatherList0Address =
+ cmd_blk->sgl_addr;
+ } else
+ hw_sgl = dma_addr->ScatterGatherSegments;
+
+ scsi_for_each_sg(scmd, sgl, nsge, i) {
+ if (WARN_ON(!hw_sgl)) {
+ scsi_dma_unmap(scmd);
+ scmd->result = (DID_ERROR << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ hw_sgl->SegmentDataPointer = (u64)sg_dma_address(sgl);
+ hw_sgl->SegmentByteCount = (u64)sg_dma_len(sgl);
+ hw_sgl++;
+ }
+ }
+submit:
+ spin_lock_irqsave(&c->queue_lock, flags);
+ c->V2.QueueCommand(c, cmd_blk);
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+
+ return 0;
+}
+
+static int mylex_v2_slave_alloc(struct scsi_device *sdev)
+{
+ DAC960_Controller_T *c =
+ (DAC960_Controller_T *)sdev->host->hostdata;
+ unsigned char status;
+
+ if (sdev->channel > c->host->max_channel)
+ return 0;
+
+ if (sdev->channel >= c->PhysicalChannelCount) {
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info;
+ unsigned short ldev_num;
+
+ if (sdev->lun > 0)
+ return -ENXIO;
+
+ ldev_num = mylex_translate_ldev(c, sdev);
+ if (ldev_num >= c->LogicalDriveCount)
+ return -ENXIO;
+
+ ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
+ if (!ldev_info)
+ return -ENOMEM;
+
+ status = DAC960_V2_NewLogicalDeviceInfo(c, ldev_num,
+ ldev_info);
+ if (status != DAC960_V2_NormalCompletion) {
+ sdev->hostdata = NULL;
+ kfree(ldev_info);
+ } else {
+ enum raid_level level;
+
+ dev_dbg(&sdev->sdev_gendev,
+ "Logical device mapping %d:%d:%d -> %d\n",
+ ldev_info->Channel, ldev_info->TargetID,
+ ldev_info->LogicalUnit,
+ ldev_info->LogicalDeviceNumber);
+
+ sdev->hostdata = ldev_info;
+ switch (ldev_info->RAIDLevel) {
+ case DAC960_V2_RAID_Level0:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case DAC960_V2_RAID_Level1:
+ level = RAID_LEVEL_1;
+ break;
+ case DAC960_V2_RAID_Level3:
+ case DAC960_V2_RAID_Level3F:
+ case DAC960_V2_RAID_Level3L:
+ level = RAID_LEVEL_3;
+ break;
+ case DAC960_V2_RAID_Level5:
+ case DAC960_V2_RAID_Level5L:
+ level = RAID_LEVEL_5;
+ break;
+ case DAC960_V2_RAID_Level6:
+ level = RAID_LEVEL_6;
+ break;
+ case DAC960_V2_RAID_LevelE:
+ case DAC960_V2_RAID_NewSpan:
+ case DAC960_V2_RAID_Span:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case DAC960_V2_RAID_JBOD:
+ level = RAID_LEVEL_JBOD;
+ break;
+ default:
+ level = RAID_LEVEL_UNKNOWN;
+ break;
+ }
+ raid_set_level(mylex_v2_raid_template,
+ &sdev->sdev_gendev, level);
+ if (ldev_info->State != DAC960_V2_Device_Online) {
+ const char *name;
+
+ name = DAC960_V2_DriveStateName(ldev_info->State);
+ sdev_printk(KERN_DEBUG, sdev,
+ "logical device in state %s\n",
+ name ? name : "Invalid");
+ }
+ }
+ } else {
+ DAC960_V2_PhysicalDeviceInfo_T *pdev_info;
+
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
+ if (!pdev_info)
+ return -ENOMEM;
+
+ status = DAC960_V2_NewPhysicalDeviceInfo(c, sdev->channel,
+ sdev->id, sdev->lun,
+ pdev_info);
+ if (status != DAC960_V2_NormalCompletion) {
+ sdev->hostdata = NULL;
+ kfree(pdev_info);
+ return -ENXIO;
+ }
+ sdev->hostdata = pdev_info;
+ }
+ return 0;
+}
+
+static int mylex_v2_slave_configure(struct scsi_device *sdev)
+{
+ DAC960_Controller_T *c =
+ (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info;
+
+ if (sdev->channel > c->host->max_channel)
+ return -ENXIO;
+
+ if (sdev->channel < c->PhysicalChannelCount) {
+ /* Skip HBA device */
+ if (sdev->type == TYPE_RAID)
+ return -ENXIO;
+ sdev->no_uld_attach = 1;
+ return 0;
+ }
+ if (sdev->lun != 0)
+ return -ENXIO;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ if (ldev_info->LogicalDeviceControl.WriteCache ==
+ DAC960_V2_WriteCacheEnabled ||
+ ldev_info->LogicalDeviceControl.WriteCache ==
+ DAC960_V2_IntelligentWriteCacheEnabled)
+ sdev->wce_default_on = 1;
+ sdev->tagged_supported = 1;
+ return 0;
+}
+
+static void mylex_v2_slave_destroy(struct scsi_device *sdev)
+{
+ void *hostdata = sdev->hostdata;
+
+ if (hostdata) {
+ kfree(hostdata);
+ sdev->hostdata = NULL;
+ }
+}
+
+static struct device_attribute *mylex_sdev_attrs[] = {
+ &dev_attr_consistency_check,
+ &dev_attr_rebuild,
+ &dev_attr_raid_state,
+ &dev_attr_raid_level,
+ NULL,
+};
+
+static ssize_t mylex_v2_show_ctlr_serial(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+ char serial[17];
+
+ memcpy(serial, c->V2.ControllerInformation.ControllerSerialNumber, 16);
+ serial[16] = '\0';
+ return snprintf(buf, 16, "%s\n", serial);
+}
+static DEVICE_ATTR(serial, S_IRUGO, mylex_v2_show_ctlr_serial, NULL);
+
+static struct DAC960_V2_ProcessorTypeTbl {
+ DAC960_V2_ProcessorType_T type;
+ char *name;
+} DAC960_V2_ProcessorTypeNames[] = {
+ { DAC960_V2_ProcessorType_i960CA, "i960CA" },
+ { DAC960_V2_ProcessorType_i960RD, "i960RD" },
+ { DAC960_V2_ProcessorType_i960RN, "i960RN" },
+ { DAC960_V2_ProcessorType_i960RP, "i960RP" },
+ { DAC960_V2_ProcessorType_NorthBay, "NorthBay" },
+ { DAC960_V2_ProcessorType_StrongArm, "StrongARM" },
+ { DAC960_V2_ProcessorType_i960RM, "i960RM" },
+ { 0xff, NULL },
+};
+
+static ssize_t mylex_v2_show_processor(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+ struct DAC960_V2_ProcessorTypeTbl *tbl = DAC960_V2_ProcessorTypeNames;
+ const char *first_processor = NULL;
+ const char *second_processor = NULL;
+ DAC960_V2_ControllerInfo_T *info = &c->V2.ControllerInformation;
+ ssize_t ret;
+
+ if (info->FirstProcessorCount) {
+ while (tbl && tbl->name) {
+ if (tbl->type == info->FirstProcessorType) {
+ first_processor = tbl->name;
+ break;
+ }
+ tbl++;
+ }
+ }
+ if (info->SecondProcessorCount) {
+ tbl = DAC960_V2_ProcessorTypeNames;
+ while (tbl && tbl->name) {
+ if (tbl->type == info->SecondProcessorType) {
+ second_processor = tbl->name;
+ break;
+ }
+ tbl++;
+ }
+ }
+ if (first_processor && second_processor)
+ ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
+ "2: %s (%s, %d cpus)\n",
+ info->FirstProcessorName,
+ first_processor, info->FirstProcessorCount,
+ info->SecondProcessorName,
+ second_processor, info->SecondProcessorCount);
+ else if (!second_processor)
+ ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
+ info->FirstProcessorName,
+ first_processor, info->FirstProcessorCount );
+ else if (!first_processor)
+ ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
+ info->SecondProcessorName,
+ second_processor, info->SecondProcessorCount);
+ else
+ ret = snprintf(buf, 64, "1: absent\n2: absent\n");
+
+ return ret;
+}
+static DEVICE_ATTR(processor, S_IRUGO, mylex_v2_show_processor, NULL);
+
+static ssize_t mylex_v2_store_discovery_command(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+ DAC960_V2_CommandBlock_T *cmd_blk;
+ DAC960_V2_CommandMailbox_T *mbox;
+ unsigned char status;
+
+ mutex_lock(&c->V2.dcmd_mutex);
+ cmd_blk = &c->V2.DirectCommandBlock;
+ DAC960_V2_ClearCommand(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->Common.opcode = DAC960_V2_IOCTL;
+ mbox->Common.id = DAC960_DirectCommandIdentifier;
+ mbox->Common.control.DataTransferControllerToHost = true;
+ mbox->Common.control.NoAutoRequestSense = true;
+ mbox->Common.IOCTL_Opcode = DAC960_V2_StartDiscovery;
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&c->V2.dcmd_mutex);
+ if (status != DAC960_V2_NormalCompletion) {
+ shost_printk(KERN_INFO, c->host,
+ "Discovery Not Initiated, status %02X\n",
+ status);
+ return -EINVAL;
+ }
+ shost_printk(KERN_INFO, c->host, "Discovery Initiated\n");
+ c->V2.NextEventSequenceNumber = 0;
+ c->V2.NeedControllerInformation = true;
+ queue_delayed_work(c->work_q, &c->monitor_work, 1);
+ flush_delayed_work(&c->monitor_work);
+ shost_printk(KERN_INFO, c->host, "Discovery Completed\n");
+
+ return count;
+}
+static DEVICE_ATTR(discovery, S_IWUSR, NULL, mylex_v2_store_discovery_command);
+
+static ssize_t mylex_v2_show_suppress_enclosure_messages(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)shost->hostdata;
+
+ return snprintf(buf, 3, "%d\n", c->SuppressEnclosureMessages);
+}
+
+static ssize_t mylex_v2_store_suppress_enclosure_messages(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ char tmpbuf[8];
+ ssize_t len;
+ int value;
+
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+ strncpy(tmpbuf, buf, len);
+ tmpbuf[len] = '\0';
+ if (sscanf(tmpbuf, "%d", &value) != 1 || value > 2)
+ return -EINVAL;
+
+ c->SuppressEnclosureMessages = value;
+ return count;
+}
+static DEVICE_ATTR(disable_enclosure_messages, S_IRUGO | S_IWUSR,
+ mylex_v2_show_suppress_enclosure_messages,
+ mylex_v2_store_suppress_enclosure_messages);
+
+static struct device_attribute *mylex_v2_shost_attrs[] = {
+ &dev_attr_serial,
+ &dev_attr_mylex_num,
+ &dev_attr_processor,
+ &dev_attr_firmware,
+ &dev_attr_discovery,
+ &dev_attr_flush_cache,
+ &dev_attr_disable_enclosure_messages,
+ NULL,
+};
+
+struct scsi_host_template mylex_v2_template = {
+ .module = THIS_MODULE,
+ .name = DAC960_DriverName,
+ .proc_name = "mylex",
+ .queuecommand = mylex_v2_queuecommand,
+ .eh_host_reset_handler = DAC960_host_reset,
+ .slave_alloc = mylex_v2_slave_alloc,
+ .slave_configure = mylex_v2_slave_configure,
+ .slave_destroy = mylex_v2_slave_destroy,
+ .cmd_size = sizeof(DAC960_V2_CommandBlock_T),
+ .shost_attrs = mylex_v2_shost_attrs,
+ .sdev_attrs = mylex_sdev_attrs,
+ .this_id = -1,
+};
+
+/**
+ * mylex_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+mylex_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+
+ return (sdev->channel >= c->PhysicalChannelCount) ? 1 : 0;
+}
+
+/**
+ * mylex_v1_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+mylex_v1_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ unsigned int percent_complete = 0;
+ unsigned short ldev_num;
+ unsigned int ldev_size = 0, remaining = 0;
+
+ if (sdev->channel < c->PhysicalChannelCount)
+ return;
+ if (DAC960_V1_ControllerIsRebuilding(c)) {
+ ldev_num = c->V1.RebuildProgress->LogicalDriveNumber;
+ if (ldev_num == mylex_translate_ldev(c, sdev)) {
+ ldev_size =
+ c->V1.RebuildProgress->LogicalDriveSize;
+ remaining =
+ c->V1.RebuildProgress->RemainingBlocks;
+ }
+ }
+ if (remaining && ldev_size)
+ percent_complete = (ldev_size - remaining) * 100 / ldev_size;
+ raid_set_resync(mylex_v1_raid_template, dev, percent_complete);
+}
+
+/**
+ * mylex_v1_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void
+mylex_v1_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V1_LogicalDeviceInfo_T *ldev_info = sdev->hostdata;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+
+ if (sdev->channel < c->PhysicalChannelCount || !ldev_info)
+ state = RAID_STATE_UNKNOWN;
+ else if (DAC960_V1_ControllerIsRebuilding(c))
+ state = RAID_STATE_RESYNCING;
+ else {
+ switch (ldev_info->State) {
+ case DAC960_V1_Device_Online:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case DAC960_V1_Device_WriteOnly:
+ case DAC960_V1_Device_Critical:
+ state = RAID_STATE_DEGRADED;
+ break;
+ default:
+ state = RAID_STATE_OFFLINE;
+ }
+ }
+ raid_set_state(mylex_v1_raid_template, dev, state);
+}
+
+static struct raid_function_template mylex_v1_raid_functions = {
+ .cookie = &mylex_v1_template,
+ .is_raid = mylex_is_raid,
+ .get_resync = mylex_v1_get_resync,
+ .get_state = mylex_v1_get_state,
+};
+
+/**
+ * mylex_v2_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+mylex_v2_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info = sdev->hostdata;
+ u8 percent_complete = 0, status;
+
+ if (sdev->channel < c->PhysicalChannelCount || !ldev_info)
+ return;
+ if (ldev_info->RebuildInProgress) {
+ unsigned short ldev_num = ldev_info->LogicalDeviceNumber;
+
+ status = DAC960_V2_NewLogicalDeviceInfo(c, ldev_num,
+ ldev_info);
+ percent_complete = ldev_info->RebuildBlockNumber * 100 /
+ ldev_info->ConfigurableDeviceSize;
+ }
+ raid_set_resync(mylex_v2_raid_template, dev, percent_complete);
+}
+
+/**
+ * mylex_v2_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void
+mylex_v2_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ DAC960_Controller_T *c = (DAC960_Controller_T *)sdev->host->hostdata;
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info = sdev->hostdata;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+
+ if (sdev->channel < c->PhysicalChannelCount || !ldev_info)
+ state = RAID_STATE_UNKNOWN;
+ else {
+ switch (ldev_info->State) {
+ case DAC960_V2_Device_Online:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case DAC960_V2_Device_SuspectedCritical:
+ case DAC960_V2_Device_Critical:
+ state = RAID_STATE_DEGRADED;
+ break;
+ case DAC960_V2_Device_Rebuild:
+ state = RAID_STATE_RESYNCING;
+ break;
+ case DAC960_V2_Device_Unconfigured:
+ case DAC960_V2_Device_InvalidState:
+ state = RAID_STATE_UNKNOWN;
+ break;
+ default:
+ state = RAID_STATE_OFFLINE;
+ }
+ }
+ raid_set_state(mylex_v2_raid_template, dev, state);
+}
+
+static struct raid_function_template mylex_v2_raid_functions = {
+ .cookie = &mylex_v2_template,
+ .is_raid = mylex_is_raid,
+ .get_resync = mylex_v2_get_resync,
+ .get_state = mylex_v2_get_state,
+};
+
+/*
+ DAC960_DetectController detects Mylex DAC960/AcceleRAID/eXtremeRAID
+ PCI RAID Controllers by interrogating the PCI Configuration Space for
+ Controller Type.
+*/
+
+static DAC960_Controller_T *
+DAC960_DetectController(struct pci_dev *pdev,
+ const struct pci_device_id *entry)
+{
+ struct DAC960_privdata *privdata =
+ (struct DAC960_privdata *)entry->driver_data;
+ irq_handler_t InterruptHandler = privdata->InterruptHandler;
+ unsigned int MemoryWindowSize = privdata->MemoryWindowSize;
+ struct Scsi_Host *shost;
+ DAC960_Controller_T *c = NULL;
+ unsigned char DeviceFunction = pdev->devfn;
+ unsigned char ErrorStatus, Parameter0, Parameter1;
+ void __iomem *base;
+ int timeout = 0;
+
+ if (privdata->FirmwareType == DAC960_V1_Controller)
+ shost = scsi_host_alloc(&mylex_v1_template,
+ sizeof(DAC960_Controller_T));
+ else
+ shost = scsi_host_alloc(&mylex_v2_template,
+ sizeof(DAC960_Controller_T));
+ if (!shost) {
+ dev_err(&pdev->dev, "Unable to allocate Controller\n");
+ return NULL;
+ }
+ c = (DAC960_Controller_T *)shost->hostdata;
+ c->host = shost;
+ c->ControllerNumber = DAC960_ControllerCount++;
+ c->Bus = pdev->bus->number;
+ c->FirmwareType = privdata->FirmwareType;
+ c->HardwareType = privdata->HardwareType;
+ c->Device = DeviceFunction >> 3;
+ c->Function = DeviceFunction & 0x7;
+ c->PCIDevice = pdev;
+ strcpy(c->FullModelName, "DAC960");
+ shost->max_lun = 256;
+ if (c->FirmwareType == DAC960_V1_Controller) {
+ shost->max_cmd_len = 12;
+ mutex_init(&c->V1.dcmd_mutex);
+ mutex_init(&c->V1.dma_mutex);
+ } else {
+ shost->max_cmd_len = 16;
+ mutex_init(&c->V2.dcmd_mutex);
+ mutex_init(&c->V2.cinfo_mutex);
+ }
+
+ snprintf(c->work_q_name, sizeof(c->work_q_name),
+ "mylex_wq_%d", shost->host_no);
+ c->work_q = create_singlethread_workqueue(c->work_q_name);
+ if (!c->work_q)
+ goto Failure;
+
+ if (pci_enable_device(pdev))
+ goto Failure;
+
+ switch (c->HardwareType) {
+ case DAC960_PD_Controller:
+ case DAC960_P_Controller:
+ c->IO_Address = pci_resource_start(pdev, 0);
+ c->PCI_Address = pci_resource_start(pdev, 1);
+ break;
+ default:
+ c->PCI_Address = pci_resource_start(pdev, 0);
+ break;
+ }
+
+ pci_set_drvdata(pdev, c);
+ spin_lock_init(&c->queue_lock);
+ /*
+ Map the Controller Register Window.
+ */
+ if (MemoryWindowSize < PAGE_SIZE)
+ MemoryWindowSize = PAGE_SIZE;
+ c->MemoryMappedAddress =
+ ioremap_nocache(c->PCI_Address & PAGE_MASK, MemoryWindowSize);
+ c->BaseAddress =
+ c->MemoryMappedAddress + (c->PCI_Address & ~PAGE_MASK);
+ if (c->MemoryMappedAddress == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to map Controller Register Window\n");
+ goto Failure;
+ }
+ base = c->BaseAddress;
+ switch (c->HardwareType) {
+ case DAC960_GEM_Controller:
+ DAC960_GEM_DisableInterrupts(base);
+ DAC960_GEM_AcknowledgeHardwareMailboxStatus(base);
+ udelay(1000);
+ while (DAC960_GEM_InitializationInProgressP(base) &&
+ timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (DAC960_GEM_ReadErrorStatus(base, &ErrorStatus,
+ &Parameter0, &Parameter1) &&
+ DAC960_ReportErrorStatus(c, ErrorStatus,
+ Parameter0, Parameter1))
+ goto Failure;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == DAC960_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for "
+ "Controller Initialisation\n");
+ goto Failure;
+ }
+ if (!DAC960_V2_EnableMemoryMailboxInterface(c)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_GEM_ControllerReset(base);
+ goto Failure;
+ }
+ DAC960_GEM_EnableInterrupts(base);
+ c->V2.QueueCommand = DAC960_V2_QueueCommand;
+ c->V2.WriteCommandMailbox = DAC960_GEM_WriteCommandMailbox;
+ c->V2.MailboxNewCommand = DAC960_GEM_MemoryMailboxNewCommand;
+ c->ReadControllerConfiguration =
+ DAC960_V2_ReadControllerConfiguration;
+ c->DisableInterrupts = DAC960_GEM_DisableInterrupts;
+ c->Reset = DAC960_GEM_ControllerReset;
+ break;
+ case DAC960_BA_Controller:
+ DAC960_BA_DisableInterrupts(base);
+ DAC960_BA_AcknowledgeHardwareMailboxStatus(base);
+ udelay(1000);
+ while (DAC960_BA_InitializationInProgressP(base) &&
+ timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (DAC960_BA_ReadErrorStatus(base, &ErrorStatus,
+ &Parameter0, &Parameter1) &&
+ DAC960_ReportErrorStatus(c, ErrorStatus,
+ Parameter0, Parameter1))
+ goto Failure;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == DAC960_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev, "Timeout waiting "
+ "for Controller Initialisation\n");
+ goto Failure;
+ }
+ if (!DAC960_V2_EnableMemoryMailboxInterface(c)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_BA_ControllerReset(base);
+ goto Failure;
+ }
+ DAC960_BA_EnableInterrupts(base);
+ c->V2.QueueCommand = DAC960_V2_QueueCommand;
+ c->V2.WriteCommandMailbox = DAC960_BA_WriteCommandMailbox;
+ c->V2.MailboxNewCommand = DAC960_BA_MemoryMailboxNewCommand;
+ c->ReadControllerConfiguration =
+ DAC960_V2_ReadControllerConfiguration;
+ c->DisableInterrupts = DAC960_BA_DisableInterrupts;
+ c->Reset = DAC960_BA_ControllerReset;
+ break;
+ case DAC960_LP_Controller:
+ DAC960_LP_DisableInterrupts(base);
+ DAC960_LP_AcknowledgeHardwareMailboxStatus(base);
+ udelay(1000);
+ while (DAC960_LP_InitializationInProgressP(base) &&
+ timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (DAC960_LP_ReadErrorStatus(base, &ErrorStatus,
+ &Parameter0, &Parameter1) &&
+ DAC960_ReportErrorStatus(c, ErrorStatus,
+ Parameter0, Parameter1))
+ goto Failure;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == DAC960_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev, "Timeout waiting "
+ "for Controller Initialisation\n");
+ goto Failure;
+ }
+ if (!DAC960_V2_EnableMemoryMailboxInterface(c)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_LP_ControllerReset(base);
+ goto Failure;
+ }
+ DAC960_LP_EnableInterrupts(base);
+ c->V2.QueueCommand = DAC960_V2_QueueCommand;
+ c->V2.WriteCommandMailbox = DAC960_LP_WriteCommandMailbox;
+ c->V2.MailboxNewCommand = DAC960_LP_MemoryMailboxNewCommand;
+ c->ReadControllerConfiguration =
+ DAC960_V2_ReadControllerConfiguration;
+ c->DisableInterrupts = DAC960_LP_DisableInterrupts;
+ c->Reset = DAC960_LP_ControllerReset;
+ break;
+ case DAC960_LA_Controller:
+ DAC960_LA_DisableInterrupts(base);
+ timeout = 0;
+ while (DAC960_LA_HardwareMailboxStatusAvailableP(base) &&
+ timeout < DAC960_MAILBOX_TIMEOUT) {
+ DAC960_LA_AcknowledgeHardwareMailboxStatus(base);
+ udelay(10);
+ timeout++;
+ }
+ if (DAC960_LA_HardwareMailboxStatusAvailableP(base)) {
+ dev_err(&pdev->dev,
+ "Hardware Mailbox status still not cleared\n");
+ DAC960_LA_ControllerReset(base);
+ } else if (timeout)
+ dev_info(&pdev->dev,
+ "Hardware Mailbox status cleared, %d attempts\n",
+ timeout);
+
+ udelay(1000);
+ timeout = 0;
+ while (DAC960_LA_InitializationInProgressP(base) &&
+ timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (DAC960_LA_ReadErrorStatus(base, &ErrorStatus,
+ &Parameter0, &Parameter1) &&
+ DAC960_ReportErrorStatus(c, ErrorStatus,
+ Parameter0, Parameter1))
+ goto Failure;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == DAC960_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev, "Timeout waiting "
+ "for Controller Initialisation\n");
+ goto Failure;
+ }
+ if (!DAC960_V1_EnableMemoryMailboxInterface(c)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_LA_ControllerReset(base);
+ goto Failure;
+ }
+ DAC960_LA_EnableInterrupts(base);
+ c->V1.QueueCommand = DAC960_V1_QueueCommand;
+ c->V1.WriteCommandMailbox = DAC960_LA_WriteCommandMailbox;
+ if (c->V1.DualModeMemoryMailboxInterface)
+ c->V1.MailboxNewCommand =
+ DAC960_LA_MemoryMailboxNewCommand;
+ else
+ c->V1.MailboxNewCommand =
+ DAC960_LA_HardwareMailboxNewCommand;
+ c->ReadControllerConfiguration =
+ DAC960_V1_ReadControllerConfiguration;
+ c->DisableInterrupts = DAC960_LA_DisableInterrupts;
+ c->Reset = DAC960_LA_ControllerReset;
+ break;
+ case DAC960_PG_Controller:
+ DAC960_PG_DisableInterrupts(base);
+ DAC960_PG_AcknowledgeHardwareMailboxStatus(base);
+ udelay(1000);
+ while (DAC960_PG_InitializationInProgressP(base) &&
+ timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (DAC960_PG_ReadErrorStatus(base, &ErrorStatus,
+ &Parameter0, &Parameter1) &&
+ DAC960_ReportErrorStatus(c, ErrorStatus,
+ Parameter0, Parameter1))
+ goto Failure;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == DAC960_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev, "Timeout waiting "
+ "for Controller Initialisation\n");
+ goto Failure;
+ }
+ if (!DAC960_V1_EnableMemoryMailboxInterface(c)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_PG_ControllerReset(base);
+ goto Failure;
+ }
+ DAC960_PG_EnableInterrupts(base);
+ c->V1.QueueCommand = DAC960_V1_QueueCommand;
+ c->V1.WriteCommandMailbox = DAC960_PG_WriteCommandMailbox;
+ if (c->V1.DualModeMemoryMailboxInterface)
+ c->V1.MailboxNewCommand =
+ DAC960_PG_MemoryMailboxNewCommand;
+ else
+ c->V1.MailboxNewCommand =
+ DAC960_PG_HardwareMailboxNewCommand;
+ c->ReadControllerConfiguration =
+ DAC960_V1_ReadControllerConfiguration;
+ c->DisableInterrupts = DAC960_PG_DisableInterrupts;
+ c->Reset = DAC960_PG_ControllerReset;
+ break;
+ case DAC960_PD_Controller:
+ if (!request_region(c->IO_Address, 0x80,
+ c->FullModelName)) {
+ dev_err(&pdev->dev,
+ "IO port 0x%lx busy\n",
+ (unsigned long)c->IO_Address);
+ goto Failure;
+ }
+ DAC960_PD_DisableInterrupts(base);
+ DAC960_PD_AcknowledgeStatus(base);
+ udelay(1000);
+ while (DAC960_PD_InitializationInProgressP(base) &&
+ timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (DAC960_PD_ReadErrorStatus(base, &ErrorStatus,
+ &Parameter0, &Parameter1) &&
+ DAC960_ReportErrorStatus(c, ErrorStatus,
+ Parameter0, Parameter1))
+ goto Failure;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == DAC960_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev, "Timeout waiting "
+ "for Controller Initialisation\n");
+ goto Failure;
+ }
+ if (!DAC960_V1_EnableMemoryMailboxInterface(c)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_PD_ControllerReset(base);
+ goto Failure;
+ }
+ DAC960_PD_EnableInterrupts(base);
+ c->V1.QueueCommand = DAC960_PD_QueueCommand;
+ c->ReadControllerConfiguration =
+ DAC960_V1_ReadControllerConfiguration;
+ c->DisableInterrupts = DAC960_PD_DisableInterrupts;
+ c->Reset = DAC960_PD_ControllerReset;
+ break;
+ case DAC960_P_Controller:
+ if (!request_region(c->IO_Address, 0x80,
+ c->FullModelName)){
+ dev_err(&pdev->dev,
+ "IO port 0x%lx busy\n",
+ (unsigned long)c->IO_Address);
+ goto Failure;
+ }
+ DAC960_PD_DisableInterrupts(base);
+ DAC960_PD_AcknowledgeStatus(base);
+ udelay(1000);
+ while (DAC960_PD_InitializationInProgressP(base) &&
+ timeout < DAC960_MAILBOX_TIMEOUT) {
+ if (DAC960_PD_ReadErrorStatus(base, &ErrorStatus,
+ &Parameter0, &Parameter1) &&
+ DAC960_ReportErrorStatus(c, ErrorStatus,
+ Parameter0, Parameter1))
+ goto Failure;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == DAC960_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller "
+ "Initialisation\n");
+ goto Failure;
+ }
+ if (!DAC960_V1_EnableMemoryMailboxInterface(c)) {
+ dev_err(&pdev->dev,
+ "Unable to allocate DMA mapped memory\n");
+ DAC960_PD_ControllerReset(base);
+ goto Failure;
+ }
+ DAC960_PD_EnableInterrupts(base);
+ c->V1.QueueCommand = DAC960_P_QueueCommand;
+ c->ReadControllerConfiguration =
+ DAC960_V1_ReadControllerConfiguration;
+ c->DisableInterrupts = DAC960_PD_DisableInterrupts;
+ c->Reset = DAC960_PD_ControllerReset;
+ break;
+ }
+ /*
+ Acquire shared access to the IRQ Channel.
+ */
+ if (request_irq(pdev->irq, InterruptHandler, IRQF_SHARED,
+ c->FullModelName, c) < 0) {
+ dev_err(&pdev->dev,
+ "Unable to acquire IRQ Channel %d\n", pdev->irq);
+ goto Failure;
+ }
+ c->IRQ_Channel = pdev->irq;
+ return c;
+
+Failure:
+ dev_err(&pdev->dev,
+ "Failed to initialize Controller\n");
+ DAC960_DetectCleanup(c);
+ DAC960_ControllerCount--;
+ return NULL;
+}
+
+/*
+ DAC960_Probe verifies controller's existence and
+ initializes the DAC960 Driver for that controller.
+*/
+
+static int
+DAC960_Probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+ DAC960_Controller_T *c;
+ int ret;
+
+ c = DAC960_DetectController(dev, entry);
+ if (!c)
+ return -ENODEV;
+
+ ret = DAC960_ReadControllerConfiguration(c);
+ if (ret < 0) {
+ DAC960_DetectCleanup(c);
+ return ret;
+ }
+ DAC960_ReportControllerConfiguration(c);
+
+ if (!DAC960_CreateAuxiliaryStructures(c)) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ /*
+ Initialize the Monitoring Timer.
+ */
+ INIT_DELAYED_WORK(&c->monitor_work, DAC960_MonitoringWork);
+ queue_delayed_work(c->work_q, &c->monitor_work, 1);
+
+ ret = scsi_add_host(c->host, &dev->dev);
+ if (ret) {
+ dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+ cancel_delayed_work_sync(&c->monitor_work);
+ DAC960_DestroyAuxiliaryStructures(c);
+ goto failed;
+ }
+ scsi_scan_host(c->host);
+ return 0;
+failed:
+ DAC960_DetectCleanup(c);
+ return ret;
+}
+
+
+/*
+ DAC960_Finalize finalizes the DAC960 Driver.
+*/
+
+static void DAC960_Remove(struct pci_dev *pdev)
+{
+ DAC960_Controller_T *c = pci_get_drvdata(pdev);
+
+ if (c == NULL)
+ return;
+
+ cancel_delayed_work_sync(&c->monitor_work);
+ if (c->FirmwareType == DAC960_V1_Controller) {
+ shost_printk(KERN_NOTICE, c->host, "Flushing Cache...");
+ DAC960_V1_ExecuteType3(c, DAC960_V1_Flush, 0);
+ } else {
+ shost_printk(KERN_NOTICE, c->host, "Flushing Cache...");
+ DAC960_V2_DeviceOperation(c, DAC960_V2_PauseDevice,
+ DAC960_V2_RAID_Controller);
+ }
+ DAC960_DestroyAuxiliaryStructures(c);
+ DAC960_DetectCleanup(c);
+}
+
+
+/*
+ DAC960_V1_HandleSCSI performs completion processing for Command
+ for DAC960 V1 Firmware Controllers.
+*/
+
+static void DAC960_V1_HandleSCSI(DAC960_Controller_T *c,
+ DAC960_V1_CommandBlock_T *cmd_blk,
+ struct scsi_cmnd *scmd)
+{
+ unsigned short status;
+
+ if (!cmd_blk)
+ return;
+
+ BUG_ON(!scmd);
+ scsi_dma_unmap(scmd);
+
+ if (cmd_blk->DCDB) {
+ memcpy(scmd->sense_buffer, &cmd_blk->DCDB->SenseData, 64);
+ pci_pool_free(c->V1.DCDBPool, cmd_blk->DCDB,
+ cmd_blk->DCDB_dma);
+ cmd_blk->DCDB = NULL;
+ }
+ if (cmd_blk->sgl) {
+ pci_pool_free(c->ScatterGatherPool, cmd_blk->sgl,
+ cmd_blk->sgl_addr);
+ cmd_blk->sgl = NULL;
+ cmd_blk->sgl_addr = 0;
+ }
+ status = cmd_blk->status;
+ switch (status) {
+ case DAC960_V1_NormalCompletion:
+ case DAC960_V1_DeviceBusy:
+ scmd->result = (DID_OK << 16) | status;
+ break;
+ case DAC960_V1_BadDataEncountered:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Bad Data Encountered\n");
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ /* Unrecovered read error */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x11, 0);
+ else
+ /* Write error */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x0C, 0);
+ scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ break;
+ case DAC960_V1_IrrecoverableDataError:
+ scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ /* Unrecovered read error, auto-reallocation failed */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x11, 0x04);
+ else
+ /* Write error, auto-reallocation failed */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x0C, 0x02);
+ scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ break;
+ case DAC960_V1_LogicalDriveNonexistentOrOffline:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Logical Drive Nonexistent or Offline");
+ scmd->result = (DID_BAD_TARGET << 16);
+ break;
+ case DAC960_V1_AccessBeyondEndOfLogicalDrive:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Attempt to Access Beyond End of Logical Drive");
+ /* Logical block address out of range */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ NOT_READY, 0x21, 0);
+ break;
+ case DAC960_V1_DeviceNonresponsive:
+ dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
+ scmd->result = (DID_BAD_TARGET << 16);
+ break;
+ default:
+ scmd_printk(KERN_ERR, scmd,
+ "Unexpected Error Status %04X", status);
+ scmd->result = (DID_ERROR << 16);
+ break;
+ }
+ scmd->scsi_done(scmd);
+}
+
+static void DAC960_V1_HandleCommandBlock(DAC960_Controller_T *c,
+ DAC960_V1_CommandBlock_T *cmd_blk)
+{
+ if (!cmd_blk)
+ return;
+
+ if (cmd_blk->Completion) {
+ complete(cmd_blk->Completion);
+ cmd_blk->Completion = NULL;
+ }
+}
+
+
+/*
+ DAC960_V2_ReportEvent prints an appropriate message when a Controller Event
+ occurs.
+*/
+
+static struct {
+ int EventCode;
+ unsigned char *EventMessage;
+} EventList[] =
+{ /* Physical Device Events (0x0000 - 0x007F) */
+ { 0x0001, "P Online" },
+ { 0x0002, "P Standby" },
+ { 0x0005, "P Automatic Rebuild Started" },
+ { 0x0006, "P Manual Rebuild Started" },
+ { 0x0007, "P Rebuild Completed" },
+ { 0x0008, "P Rebuild Cancelled" },
+ { 0x0009, "P Rebuild Failed for Unknown Reasons" },
+ { 0x000A, "P Rebuild Failed due to New Physical Device" },
+ { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
+ { 0x000C, "S Offline" },
+ { 0x000D, "P Found" },
+ { 0x000E, "P Removed" },
+ { 0x000F, "P Unconfigured" },
+ { 0x0010, "P Expand Capacity Started" },
+ { 0x0011, "P Expand Capacity Completed" },
+ { 0x0012, "P Expand Capacity Failed" },
+ { 0x0013, "P Command Timed Out" },
+ { 0x0014, "P Command Aborted" },
+ { 0x0015, "P Command Retried" },
+ { 0x0016, "P Parity Error" },
+ { 0x0017, "P Soft Error" },
+ { 0x0018, "P Miscellaneous Error" },
+ { 0x0019, "P Reset" },
+ { 0x001A, "P Active Spare Found" },
+ { 0x001B, "P Warm Spare Found" },
+ { 0x001C, "S Sense Data Received" },
+ { 0x001D, "P Initialization Started" },
+ { 0x001E, "P Initialization Completed" },
+ { 0x001F, "P Initialization Failed" },
+ { 0x0020, "P Initialization Cancelled" },
+ { 0x0021, "P Failed because Write Recovery Failed" },
+ { 0x0022, "P Failed because SCSI Bus Reset Failed" },
+ { 0x0023, "P Failed because of Double Check Condition" },
+ { 0x0024, "P Failed because Device Cannot Be Accessed" },
+ { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
+ { 0x0026, "P Failed because of Bad Tag from Device" },
+ { 0x0027, "P Failed because of Command Timeout" },
+ { 0x0028, "P Failed because of System Reset" },
+ { 0x0029, "P Failed because of Busy Status or Parity Error" },
+ { 0x002A, "P Failed because Host Set Device to Failed State" },
+ { 0x002B, "P Failed because of Selection Timeout" },
+ { 0x002C, "P Failed because of SCSI Bus Phase Error" },
+ { 0x002D, "P Failed because Device Returned Unknown Status" },
+ { 0x002E, "P Failed because Device Not Ready" },
+ { 0x002F, "P Failed because Device Not Found at Startup" },
+ { 0x0030, "P Failed because COD Write Operation Failed" },
+ { 0x0031, "P Failed because BDT Write Operation Failed" },
+ { 0x0039, "P Missing at Startup" },
+ { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
+ { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
+ { 0x003D, "P Standby Rebuild Started" },
+ /* Logical Device Events (0x0080 - 0x00FF) */
+ { 0x0080, "M Consistency Check Started" },
+ { 0x0081, "M Consistency Check Completed" },
+ { 0x0082, "M Consistency Check Cancelled" },
+ { 0x0083, "M Consistency Check Completed With Errors" },
+ { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
+ { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
+ { 0x0086, "L Offline" },
+ { 0x0087, "L Critical" },
+ { 0x0088, "L Online" },
+ { 0x0089, "M Automatic Rebuild Started" },
+ { 0x008A, "M Manual Rebuild Started" },
+ { 0x008B, "M Rebuild Completed" },
+ { 0x008C, "M Rebuild Cancelled" },
+ { 0x008D, "M Rebuild Failed for Unknown Reasons" },
+ { 0x008E, "M Rebuild Failed due to New Physical Device" },
+ { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
+ { 0x0090, "M Initialization Started" },
+ { 0x0091, "M Initialization Completed" },
+ { 0x0092, "M Initialization Cancelled" },
+ { 0x0093, "M Initialization Failed" },
+ { 0x0094, "L Found" },
+ { 0x0095, "L Deleted" },
+ { 0x0096, "M Expand Capacity Started" },
+ { 0x0097, "M Expand Capacity Completed" },
+ { 0x0098, "M Expand Capacity Failed" },
+ { 0x0099, "L Bad Block Found" },
+ { 0x009A, "L Size Changed" },
+ { 0x009B, "L Type Changed" },
+ { 0x009C, "L Bad Data Block Found" },
+ { 0x009E, "L Read of Data Block in BDT" },
+ { 0x009F, "L Write Back Data for Disk Block Lost" },
+ { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
+ { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
+ { 0x00A2, "L Standby Rebuild Started" },
+ /* Fault Management Events (0x0100 - 0x017F) */
+ { 0x0140, "E Fan %d Failed" },
+ { 0x0141, "E Fan %d OK" },
+ { 0x0142, "E Fan %d Not Present" },
+ { 0x0143, "E Power Supply %d Failed" },
+ { 0x0144, "E Power Supply %d OK" },
+ { 0x0145, "E Power Supply %d Not Present" },
+ { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
+ { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
+ { 0x0148, "E Temperature Sensor %d Temperature Normal" },
+ { 0x0149, "E Temperature Sensor %d Not Present" },
+ { 0x014A, "E Enclosure Management Unit %d Access Critical" },
+ { 0x014B, "E Enclosure Management Unit %d Access OK" },
+ { 0x014C, "E Enclosure Management Unit %d Access Offline" },
+ /* Controller Events (0x0180 - 0x01FF) */
+ { 0x0181, "C Cache Write Back Error" },
+ { 0x0188, "C Battery Backup Unit Found" },
+ { 0x0189, "C Battery Backup Unit Charge Level Low" },
+ { 0x018A, "C Battery Backup Unit Charge Level OK" },
+ { 0x0193, "C Installation Aborted" },
+ { 0x0195, "C Battery Backup Unit Physically Removed" },
+ { 0x0196, "C Memory Error During Warm Boot" },
+ { 0x019E, "C Memory Soft ECC Error Corrected" },
+ { 0x019F, "C Memory Hard ECC Error Corrected" },
+ { 0x01A2, "C Battery Backup Unit Failed" },
+ { 0x01AB, "C Mirror Race Recovery Failed" },
+ { 0x01AC, "C Mirror Race on Critical Drive" },
+ /* Controller Internal Processor Events */
+ { 0x0380, "C Internal Controller Hung" },
+ { 0x0381, "C Internal Controller Firmware Breakpoint" },
+ { 0x0390, "C Internal Controller i960 Processor Specific Error" },
+ { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
+ { 0, "" }
+};
+
+static void DAC960_V2_ReportEvent(DAC960_Controller_T *c,
+ DAC960_V2_Event_T *Event)
+{
+ unsigned char MessageBuffer[DAC960_LineBufferSize];
+ int EventListIndex = 0, EventCode;
+ unsigned char EventType, *EventMessage;
+ struct scsi_device *sdev;
+ struct scsi_sense_hdr sshdr;
+ unsigned char *sense_info;
+ unsigned char *cmd_specific;
+
+ if (Event->EventCode == 0x1C) {
+ if (!scsi_normalize_sense(Event->RequestSenseData,
+ 40, &sshdr))
+ memset(&sshdr, 0x0, sizeof(sshdr));
+ else {
+ sense_info = &Event->RequestSenseData[3];
+ cmd_specific = &Event->RequestSenseData[7];
+ }
+ }
+ if (sshdr.sense_key == VENDOR_SPECIFIC &&
+ (sshdr.asc == 0x80 || sshdr.asc == 0x81))
+ Event->EventCode = ((sshdr.asc - 0x80) << 8 || sshdr.ascq);
+ while (true) {
+ EventCode = EventList[EventListIndex].EventCode;
+ if (EventCode == Event->EventCode || EventCode == 0)
+ break;
+ EventListIndex++;
+ }
+ EventType = EventList[EventListIndex].EventMessage[0];
+ EventMessage = &EventList[EventListIndex].EventMessage[2];
+ if (EventCode == 0) {
+ shost_printk(KERN_WARNING, c->host,
+ "Unknown Controller Event Code %04X\n",
+ Event->EventCode);
+ return;
+ }
+ switch (EventType) {
+ case 'P':
+ sdev = scsi_device_lookup(c->host, Event->Channel,
+ Event->TargetID, 0);
+ sdev_printk(KERN_INFO, sdev, "%s\n", EventMessage);
+ if (sdev && sdev->hostdata &&
+ sdev->channel < c->PhysicalChannelCount) {
+ if (c->FirmwareType == DAC960_V2_Controller) {
+ DAC960_V2_PhysicalDeviceInfo_T *pdev_info =
+ sdev->hostdata;
+ switch (Event->EventCode) {
+ case 0x0001:
+ case 0x0007:
+ pdev_info->State =
+ DAC960_V2_Device_Online;
+ break;
+ case 0x0002:
+ pdev_info->State =
+ DAC960_V2_Device_Standby;
+ break;
+ case 0x000C:
+ pdev_info->State =
+ DAC960_V2_Device_Offline;
+ break;
+ case 0x000E:
+ pdev_info->State =
+ DAC960_V2_Device_Missing;
+ break;
+ case 0x000F:
+ pdev_info->State =
+ DAC960_V2_Device_Unconfigured;
+ break;
+ }
+ }
+ }
+ break;
+ case 'L':
+ shost_printk(KERN_INFO, c->host, "Logical Drive %d %s\n",
+ Event->LogicalUnit, EventMessage);
+ c->V2.NeedControllerInformation = true;
+ break;
+ case 'M':
+ shost_printk(KERN_INFO, c->host, "Logical Drive %d %s\n",
+ Event->LogicalUnit, EventMessage);
+ c->V2.NeedControllerInformation = true;
+ break;
+ case 'S':
+ if (sshdr.sense_key == NO_SENSE ||
+ (sshdr.sense_key == NOT_READY &&
+ sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
+ sshdr.ascq == 0x02)))
+ break;
+ shost_printk(KERN_INFO, c->host, "Physical Device %d:%d %s\n",
+ Event->Channel, Event->TargetID, EventMessage);
+ shost_printk(KERN_INFO, c->host,
+ "Physical Device %d:%d Request Sense: "
+ "Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
+ Event->Channel, Event->TargetID,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ shost_printk(KERN_INFO, c->host,
+ "Physical Device %d:%d Request Sense: "
+ "Information = %02X%02X%02X%02X "
+ "%02X%02X%02X%02X\n",
+ Event->Channel, Event->TargetID,
+ sense_info[0], sense_info[1],
+ sense_info[2], sense_info[3],
+ cmd_specific[0], cmd_specific[1],
+ cmd_specific[2], cmd_specific[3]);
+ break;
+ case 'E':
+ if (c->SuppressEnclosureMessages)
+ break;
+ sprintf(MessageBuffer, EventMessage, Event->LogicalUnit);
+ shost_printk(KERN_INFO, c->host, "Enclosure %d %s\n",
+ Event->TargetID, MessageBuffer);
+ break;
+ case 'C':
+ shost_printk(KERN_INFO, c->host, "Controller %s\n", EventMessage);
+ break;
+ default:
+ shost_printk(KERN_INFO, c->host, "Unknown Controller Event Code %04X\n",
+ Event->EventCode);
+ break;
+ }
+}
+
+
+/*
+ DAC960_V2_ProcessCompletedCommand performs completion processing for Command
+ for DAC960 V2 Firmware Controllers.
+*/
+
+static void DAC960_V2_HandleSCSI(DAC960_Controller_T *c,
+ DAC960_V2_CommandBlock_T *cmd_blk,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char status;
+
+ if (!cmd_blk)
+ return;
+
+ BUG_ON(!scmd);
+ scsi_dma_unmap(scmd);
+
+ if (cmd_blk->sense) {
+ if (status == DAC960_V2_AbnormalCompletion &&
+ cmd_blk->sense_len) {
+ unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ if (sense_len > cmd_blk->sense_len)
+ sense_len = cmd_blk->sense_len;
+ memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
+ }
+ pci_pool_free(c->V2.RequestSensePool, cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ }
+ if (cmd_blk->DCDB) {
+ pci_pool_free(c->V2.DCDBPool, cmd_blk->DCDB,
+ cmd_blk->DCDB_dma);
+ cmd_blk->DCDB = NULL;
+ cmd_blk->DCDB_dma = 0;
+ }
+ if (cmd_blk->sgl) {
+ pci_pool_free(c->ScatterGatherPool, cmd_blk->sgl,
+ cmd_blk->sgl_addr);
+ cmd_blk->sgl = NULL;
+ cmd_blk->sgl_addr = 0;
+ }
+ if (cmd_blk->residual)
+ scsi_set_resid(scmd, cmd_blk->residual);
+ status = cmd_blk->status;
+ if (status == DAC960_V2_DeviceNonresponsive ||
+ status == DAC960_V2_DeviceNonresponsive2)
+ scmd->result = (DID_BAD_TARGET << 16);
+ else
+ scmd->result = (DID_OK << 16) || status;
+ scmd->scsi_done(scmd);
+}
+
+static void DAC960_V2_HandleCommandBlock(DAC960_Controller_T *c,
+ DAC960_V2_CommandBlock_T *cmd_blk)
+{
+ if (!cmd_blk)
+ return;
+
+ if (cmd_blk->Completion) {
+ complete(cmd_blk->Completion);
+ cmd_blk->Completion = NULL;
+ }
+}
+
+/*
+ DAC960_GEM_InterruptHandler handles hardware interrupts from DAC960 GEM Series
+ Controllers.
+*/
+
+static irqreturn_t DAC960_GEM_InterruptHandler(int IRQ_Channel,
+ void *DeviceIdentifier)
+{
+ DAC960_Controller_T *c = DeviceIdentifier;
+ void __iomem *base = c->BaseAddress;
+ DAC960_V2_StatusMailbox_T *NextStatusMailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->queue_lock, flags);
+ DAC960_GEM_AcknowledgeInterrupt(base);
+ NextStatusMailbox = c->V2.NextStatusMailbox;
+ while (NextStatusMailbox->id > 0) {
+ unsigned short id = NextStatusMailbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ DAC960_V2_CommandBlock_T *cmd_blk = NULL;
+
+ if (id == DAC960_DirectCommandIdentifier)
+ cmd_blk = &c->V2.DirectCommandBlock;
+ else if (id == DAC960_MonitoringIdentifier)
+ cmd_blk = &c->V2.MonitoringCommandBlock;
+ else {
+ scmd = scsi_host_find_tag(c->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = NextStatusMailbox->status;
+ cmd_blk->sense_len = NextStatusMailbox->sense_len;
+ cmd_blk->residual = NextStatusMailbox->residual;
+ } else
+ dev_err(&c->PCIDevice->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(NextStatusMailbox, 0, sizeof(DAC960_V2_StatusMailbox_T));
+ if (++NextStatusMailbox > c->V2.LastStatusMailbox)
+ NextStatusMailbox = c->V2.FirstStatusMailbox;
+
+ if (id < 3)
+ DAC960_V2_HandleCommandBlock(c, cmd_blk);
+ else
+ DAC960_V2_HandleSCSI(c, cmd_blk, scmd);
+ }
+ c->V2.NextStatusMailbox = NextStatusMailbox;
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+/*
+ DAC960_BA_InterruptHandler handles hardware interrupts from DAC960 BA Series
+ Controllers.
+*/
+
+static irqreturn_t DAC960_BA_InterruptHandler(int IRQ_Channel,
+ void *DeviceIdentifier)
+{
+ DAC960_Controller_T *c = DeviceIdentifier;
+ void __iomem *base = c->BaseAddress;
+ DAC960_V2_StatusMailbox_T *NextStatusMailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->queue_lock, flags);
+ DAC960_BA_AcknowledgeInterrupt(base);
+ NextStatusMailbox = c->V2.NextStatusMailbox;
+ while (NextStatusMailbox->id > 0) {
+ unsigned short id = NextStatusMailbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ DAC960_V2_CommandBlock_T *cmd_blk = NULL;
+
+ if (id == DAC960_DirectCommandIdentifier)
+ cmd_blk = &c->V2.DirectCommandBlock;
+ else if (id == DAC960_MonitoringIdentifier)
+ cmd_blk = &c->V2.MonitoringCommandBlock;
+ else {
+ scmd = scsi_host_find_tag(c->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = NextStatusMailbox->status;
+ cmd_blk->sense_len = NextStatusMailbox->sense_len;
+ cmd_blk->residual = NextStatusMailbox->residual;
+ } else
+ dev_err(&c->PCIDevice->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(NextStatusMailbox, 0, sizeof(DAC960_V2_StatusMailbox_T));
+ if (++NextStatusMailbox > c->V2.LastStatusMailbox)
+ NextStatusMailbox = c->V2.FirstStatusMailbox;
+
+ if (id < 3)
+ DAC960_V2_HandleCommandBlock(c, cmd_blk);
+ else
+ DAC960_V2_HandleSCSI(c, cmd_blk, scmd);
+ }
+ c->V2.NextStatusMailbox = NextStatusMailbox;
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ DAC960_LP_InterruptHandler handles hardware interrupts from DAC960 LP Series
+ Controllers.
+*/
+
+static irqreturn_t DAC960_LP_InterruptHandler(int IRQ_Channel,
+ void *DeviceIdentifier)
+{
+ DAC960_Controller_T *c = DeviceIdentifier;
+ void __iomem *base = c->BaseAddress;
+ DAC960_V2_StatusMailbox_T *NextStatusMailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->queue_lock, flags);
+ DAC960_LP_AcknowledgeInterrupt(base);
+ NextStatusMailbox = c->V2.NextStatusMailbox;
+ while (NextStatusMailbox->id > 0) {
+ unsigned short id = NextStatusMailbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ DAC960_V2_CommandBlock_T *cmd_blk = NULL;
+
+ if (id == DAC960_DirectCommandIdentifier)
+ cmd_blk = &c->V2.DirectCommandBlock;
+ else if (id == DAC960_MonitoringIdentifier)
+ cmd_blk = &c->V2.MonitoringCommandBlock;
+ else {
+ scmd = scsi_host_find_tag(c->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = NextStatusMailbox->status;
+ cmd_blk->sense_len = NextStatusMailbox->sense_len;
+ cmd_blk->residual = NextStatusMailbox->residual;
+ } else
+ dev_err(&c->PCIDevice->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(NextStatusMailbox, 0, sizeof(DAC960_V2_StatusMailbox_T));
+ if (++NextStatusMailbox > c->V2.LastStatusMailbox)
+ NextStatusMailbox = c->V2.FirstStatusMailbox;
+
+ if (id < 3)
+ DAC960_V2_HandleCommandBlock(c, cmd_blk);
+ else
+ DAC960_V2_HandleSCSI(c, cmd_blk, scmd);
+ }
+ c->V2.NextStatusMailbox = NextStatusMailbox;
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ DAC960_LA_InterruptHandler handles hardware interrupts from DAC960 LA Series
+ Controllers.
+*/
+
+static irqreturn_t DAC960_LA_InterruptHandler(int IRQ_Channel,
+ void *DeviceIdentifier)
+{
+ DAC960_Controller_T *c = DeviceIdentifier;
+ void __iomem *base = c->BaseAddress;
+ DAC960_V1_StatusMailbox_T *NextStatusMailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->queue_lock, flags);
+ DAC960_LA_AcknowledgeInterrupt(base);
+ NextStatusMailbox = c->V1.NextStatusMailbox;
+ while (NextStatusMailbox->valid) {
+ unsigned char id = NextStatusMailbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ DAC960_V1_CommandBlock_T *cmd_blk = NULL;
+
+ if (id == DAC960_DirectCommandIdentifier)
+ cmd_blk = &c->V1.DirectCommandBlock;
+ else if (id == DAC960_MonitoringIdentifier)
+ cmd_blk = &c->V1.MonitoringCommandBlock;
+ else {
+ scmd = scsi_host_find_tag(c->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = NextStatusMailbox->status;
+ else
+ dev_err(&c->PCIDevice->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(NextStatusMailbox, 0, sizeof(DAC960_V1_StatusMailbox_T));
+ if (++NextStatusMailbox > c->V1.LastStatusMailbox)
+ NextStatusMailbox = c->V1.FirstStatusMailbox;
+
+ if (id < 3)
+ DAC960_V1_HandleCommandBlock(c, cmd_blk);
+ else
+ DAC960_V1_HandleSCSI(c, cmd_blk, scmd);
+ }
+ c->V1.NextStatusMailbox = NextStatusMailbox;
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ DAC960_PG_InterruptHandler handles hardware interrupts from DAC960 PG Series
+ Controllers.
+*/
+
+static irqreturn_t DAC960_PG_InterruptHandler(int IRQ_Channel,
+ void *DeviceIdentifier)
+{
+ DAC960_Controller_T *c = DeviceIdentifier;
+ void __iomem *base = c->BaseAddress;
+ DAC960_V1_StatusMailbox_T *NextStatusMailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->queue_lock, flags);
+ DAC960_PG_AcknowledgeInterrupt(base);
+ NextStatusMailbox = c->V1.NextStatusMailbox;
+ while (NextStatusMailbox->valid) {
+ unsigned char id = NextStatusMailbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ DAC960_V1_CommandBlock_T *cmd_blk = NULL;
+
+ if (id == DAC960_DirectCommandIdentifier)
+ cmd_blk = &c->V1.DirectCommandBlock;
+ else if (id == DAC960_MonitoringIdentifier)
+ cmd_blk = &c->V1.MonitoringCommandBlock;
+ else {
+ scmd = scsi_host_find_tag(c->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = NextStatusMailbox->status;
+ else
+ dev_err(&c->PCIDevice->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(NextStatusMailbox, 0, sizeof(DAC960_V1_StatusMailbox_T));
+ if (++NextStatusMailbox > c->V1.LastStatusMailbox)
+ NextStatusMailbox = c->V1.FirstStatusMailbox;
+
+ if (id < 3)
+ DAC960_V1_HandleCommandBlock(c, cmd_blk);
+ else
+ DAC960_V1_HandleSCSI(c, cmd_blk, scmd);
+ }
+ c->V1.NextStatusMailbox = NextStatusMailbox;
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ DAC960_PD_InterruptHandler handles hardware interrupts from DAC960 PD Series
+ Controllers.
+*/
+
+static irqreturn_t DAC960_PD_InterruptHandler(int IRQ_Channel,
+ void *DeviceIdentifier)
+{
+ DAC960_Controller_T *c = DeviceIdentifier;
+ void __iomem *base = c->BaseAddress;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->queue_lock, flags);
+ while (DAC960_PD_StatusAvailableP(base)) {
+ unsigned char id = DAC960_PD_ReadStatusCommandIdentifier(base);
+ struct scsi_cmnd *scmd = NULL;
+ DAC960_V1_CommandBlock_T *cmd_blk;
+
+ if (id == DAC960_DirectCommandIdentifier)
+ cmd_blk = &c->V1.DirectCommandBlock;
+ else if (id == DAC960_MonitoringIdentifier)
+ cmd_blk = &c->V1.MonitoringCommandBlock;
+ else {
+ scmd = scsi_host_find_tag(c->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status =
+ DAC960_PD_ReadStatusRegister(base);
+ else
+ dev_err(&c->PCIDevice->dev,
+ "Unhandled command completion %d\n", id);
+
+ DAC960_PD_AcknowledgeInterrupt(base);
+ DAC960_PD_AcknowledgeStatus(base);
+
+ if (id < 3)
+ DAC960_V1_HandleCommandBlock(c, cmd_blk);
+ else
+ DAC960_V1_HandleSCSI(c, cmd_blk, scmd);
+ }
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ DAC960_P_InterruptHandler handles hardware interrupts from DAC960 P Series
+ Controllers.
+
+ Translations of DAC960_V1_Enquiry and DAC960_V1_GetDeviceState rely
+ on the data having been placed into DAC960_Controller_T, rather than
+ an arbitrary buffer.
+*/
+
+static irqreturn_t DAC960_P_InterruptHandler(int IRQ_Channel,
+ void *DeviceIdentifier)
+{
+ DAC960_Controller_T *c = DeviceIdentifier;
+ void __iomem *base = c->BaseAddress;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->queue_lock, flags);
+ while (DAC960_PD_StatusAvailableP(base)) {
+ unsigned char id = DAC960_PD_ReadStatusCommandIdentifier(base);
+ struct scsi_cmnd *scmd = NULL;
+ DAC960_V1_CommandBlock_T *cmd_blk = NULL;
+
+ if (id == DAC960_DirectCommandIdentifier)
+ cmd_blk = &c->V1.DirectCommandBlock;
+ else if (id == DAC960_MonitoringIdentifier)
+ cmd_blk = &c->V1.MonitoringCommandBlock;
+ else {
+ scmd = scsi_host_find_tag(c->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status
+ = DAC960_PD_ReadStatusRegister(base);
+ else
+ dev_err(&c->PCIDevice->dev,
+ "Unhandled command completion %d\n", id);
+
+ DAC960_PD_AcknowledgeInterrupt(base);
+ DAC960_PD_AcknowledgeStatus(base);
+
+ if (cmd_blk) {
+ DAC960_V1_CommandMailbox_T *mbox;
+ DAC960_V1_CommandOpcode_T op;
+
+ mbox = &cmd_blk->mbox;
+ op = mbox->Common.opcode;
+ switch (op) {
+ case DAC960_V1_Enquiry_Old:
+ mbox->Common.opcode = DAC960_V1_Enquiry;
+ DAC960_P_To_PD_TranslateEnquiry(c->V1.NewEnquiry);
+ break;
+ case DAC960_V1_GetDeviceState_Old:
+ mbox->Common.opcode = DAC960_V1_GetDeviceState;
+ DAC960_P_To_PD_TranslateDeviceState(c->V1.NewDeviceState);
+ break;
+ case DAC960_V1_Read_Old:
+ mbox->Common.opcode = DAC960_V1_Read;
+ DAC960_P_To_PD_TranslateReadWriteCommand(cmd_blk);
+ break;
+ case DAC960_V1_Write_Old:
+ mbox->Common.opcode = DAC960_V1_Write;
+ DAC960_P_To_PD_TranslateReadWriteCommand(cmd_blk);
+ break;
+ case DAC960_V1_ReadWithScatterGather_Old:
+ mbox->Common.opcode = DAC960_V1_ReadWithScatterGather;
+ DAC960_P_To_PD_TranslateReadWriteCommand(cmd_blk);
+ break;
+ case DAC960_V1_WriteWithScatterGather_Old:
+ mbox->Common.opcode = DAC960_V1_WriteWithScatterGather;
+ DAC960_P_To_PD_TranslateReadWriteCommand(cmd_blk);
+ break;
+ default:
+ break;
+ }
+ if (id < 3)
+ DAC960_V1_HandleCommandBlock(c, cmd_blk);
+ else
+ DAC960_V1_HandleSCSI(c, cmd_blk, scmd);
+ }
+ }
+ spin_unlock_irqrestore(&c->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ DAC960_V2_MonitoringGetHealthStatus queues a Get Health Status Command
+ to DAC960 V2 Firmware Controllers.
+*/
+
+static unsigned char DAC960_V2_MonitoringGetHealthStatus(DAC960_Controller_T *c)
+{
+ DAC960_V2_CommandBlock_T *cmd_blk = &c->V2.MonitoringCommandBlock;
+ DAC960_V2_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ DAC960_V2_DataTransferMemoryAddress_T *dma_addr;
+ unsigned char status = cmd_blk->status;
+
+ DAC960_V2_ClearCommand(cmd_blk);
+ mbox->Common.opcode = DAC960_V2_IOCTL;
+ mbox->Common.id = DAC960_MonitoringIdentifier;
+ mbox->Common.control.DataTransferControllerToHost = true;
+ mbox->Common.control.NoAutoRequestSense = true;
+ mbox->Common.dma_size = sizeof(DAC960_V2_HealthStatusBuffer_T);
+ mbox->Common.IOCTL_Opcode = DAC960_V2_GetHealthStatus;
+ dma_addr = &mbox->Common.dma_addr;
+ dma_addr->ScatterGatherSegments[0].SegmentDataPointer =
+ c->V2.HealthStatusBufferDMA;
+ dma_addr->ScatterGatherSegments[0].SegmentByteCount =
+ mbox->ControllerInfo.dma_size;
+ dev_dbg(&c->host->shost_gendev, "Sending GetHealthStatus\n");
+ DAC960_V2_ExecuteCommand(c, cmd_blk);
+ status = cmd_blk->status;
+
+ return status;
+}
+
+
+/*
+ DAC960_MonitoringTimerFunction is the timer function for monitoring
+ the status of DAC960 Controllers.
+*/
+
+static void DAC960_MonitoringWork(struct work_struct *work)
+{
+ DAC960_Controller_T *c =
+ container_of(work, DAC960_Controller_T, monitor_work.work);
+ unsigned long interval = DAC960_MonitoringTimerInterval;
+ unsigned char status;
+
+ dev_dbg(&c->host->shost_gendev, "monitor tick\n");
+ if (c->FirmwareType == DAC960_V1_Controller) {
+ if (c->V1.NewEventLogSequenceNumber
+ > c->V1.OldEventLogSequenceNumber) {
+ int event = c->V1.OldEventLogSequenceNumber;
+ dev_dbg(&c->host->shost_gendev,
+ "get event log no %d/%d\n",
+ c->V1.NewEventLogSequenceNumber, event);
+ DAC960_V1_MonitorGetEventLog(c, event);
+ c->V1.OldEventLogSequenceNumber = event + 1;
+ interval = 10;
+ } else if (c->V1.NeedErrorTableInformation) {
+ c->V1.NeedErrorTableInformation = false;
+ dev_dbg(&c->host->shost_gendev, "get error table\n");
+ DAC960_V1_MonitorGetErrorTable(c);
+ interval = 10;
+ } else if (c->V1.NeedRebuildProgress &&
+ c->V1.RebuildProgressFirst) {
+ c->V1.NeedRebuildProgress = false;
+ dev_dbg(&c->host->shost_gendev,
+ "get rebuild progress\n");
+ DAC960_V1_MonitorRebuildProgress(c);
+ interval = 10;
+ } else if (c->V1.NeedLogicalDeviceInfo) {
+ c->V1.NeedLogicalDeviceInfo = false;
+ dev_dbg(&c->host->shost_gendev,
+ "get logical drive info\n");
+ DAC960_V1_GetLogicalDriveInfo(c);
+ interval = 10;
+ } else if (c->V1.NeedRebuildProgress) {
+ c->V1.NeedRebuildProgress = false;
+ dev_dbg(&c->host->shost_gendev,
+ "get rebuild progress\n");
+ DAC960_V1_MonitorRebuildProgress(c);
+ interval = 10;
+ } else if (c->V1.NeedConsistencyCheckProgress) {
+ c->V1.NeedConsistencyCheckProgress = false;
+ dev_dbg(&c->host->shost_gendev,
+ "get consistency check progress\n");
+ DAC960_V1_ConsistencyCheckProgress(c);
+ interval = 10;
+ } else if (c->V1.NeedBackgroundInitializationStatus) {
+ c->V1.NeedBackgroundInitializationStatus = false;
+ dev_dbg(&c->host->shost_gendev,
+ "get background init status\n");
+ DAC960_V1_BackgroundInitialization(c);
+ interval = 10;
+ } else {
+ dev_dbg(&c->host->shost_gendev, "new enquiry\n");
+ mutex_lock(&c->V1.dma_mutex);
+ DAC960_V1_NewEnquiry(c);
+ mutex_unlock(&c->V1.dma_mutex);
+ if ((c->V1.NewEventLogSequenceNumber
+ - c->V1.OldEventLogSequenceNumber > 0) ||
+ c->V1.NeedErrorTableInformation ||
+ c->V1.NeedRebuildProgress ||
+ c->V1.NeedLogicalDeviceInfo ||
+ c->V1.NeedRebuildProgress ||
+ c->V1.NeedConsistencyCheckProgress ||
+ c->V1.NeedBackgroundInitializationStatus)
+ dev_dbg(&c->host->shost_gendev,
+ "reschedule monitor\n");
+ }
+ } else {
+ DAC960_V2_ControllerInfo_T *info =
+ &c->V2.ControllerInformation;
+ unsigned int StatusChangeCounter =
+ c->V2.HealthStatusBuffer->StatusChangeCounter;
+
+ status = DAC960_V2_MonitoringGetHealthStatus(c);
+
+ if (c->V2.NeedControllerInformation) {
+ c->V2.NeedControllerInformation = false;
+ mutex_lock(&c->V2.cinfo_mutex);
+ status = DAC960_V2_NewControllerInfo(c);
+ mutex_unlock(&c->V2.cinfo_mutex);
+ }
+ if (c->V2.HealthStatusBuffer->NextEventSequenceNumber
+ - c->V2.NextEventSequenceNumber > 0) {
+ status = DAC960_V2_MonitorGetEvent(c);
+ if (status == DAC960_V2_NormalCompletion) {
+ DAC960_V2_ReportEvent(c, c->V2.Event);
+ c->V2.NextEventSequenceNumber++;
+ interval = 1;
+ }
+ }
+
+ if (time_after(jiffies, c->SecondaryMonitoringTime
+ + DAC960_SecondaryMonitoringInterval))
+ c->SecondaryMonitoringTime = jiffies;
+
+ if (info->BackgroundInitializationsActive +
+ info->LogicalDeviceInitializationsActive +
+ info->PhysicalDeviceInitializationsActive +
+ info->ConsistencyChecksActive +
+ info->RebuildsActive +
+ info->OnlineExpansionsActive != 0) {
+ struct scsi_device *sdev;
+ shost_for_each_device(sdev, c->host) {
+ DAC960_V2_LogicalDeviceInfo_T *ldev_info;
+ if (sdev->channel < c->PhysicalChannelCount)
+ continue;
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ continue;
+ status = DAC960_V2_NewLogicalDeviceInfo(c,
+ ldev_info->LogicalDeviceNumber,
+ ldev_info);
+ }
+ c->V2.NeedControllerInformation = true;
+ }
+ if (StatusChangeCounter == c->V2.StatusChangeCounter &&
+ c->V2.HealthStatusBuffer->NextEventSequenceNumber
+ == c->V2.NextEventSequenceNumber &&
+ (c->V2.NeedControllerInformation == false ||
+ time_before(jiffies, c->PrimaryMonitoringTime
+ + DAC960_MonitoringTimerInterval))) {
+ interval = DAC960_SecondaryMonitoringInterval;
+ }
+ }
+ if (interval > 1)
+ c->PrimaryMonitoringTime = jiffies;
+ queue_delayed_work(c->work_q, &c->monitor_work, interval);
+}
+
+static struct DAC960_privdata DAC960_GEM_privdata = {
+ .HardwareType = DAC960_GEM_Controller,
+ .FirmwareType = DAC960_V2_Controller,
+ .InterruptHandler = DAC960_GEM_InterruptHandler,
+ .MemoryWindowSize = DAC960_GEM_RegisterWindowSize,
+};
+
+
+static struct DAC960_privdata DAC960_BA_privdata = {
+ .HardwareType = DAC960_BA_Controller,
+ .FirmwareType = DAC960_V2_Controller,
+ .InterruptHandler = DAC960_BA_InterruptHandler,
+ .MemoryWindowSize = DAC960_BA_RegisterWindowSize,
+};
+
+static struct DAC960_privdata DAC960_LP_privdata = {
+ .HardwareType = DAC960_LP_Controller,
+ .FirmwareType = DAC960_V2_Controller,
+ .InterruptHandler = DAC960_LP_InterruptHandler,
+ .MemoryWindowSize = DAC960_LP_RegisterWindowSize,
+};
+
+static struct DAC960_privdata DAC960_LA_privdata = {
+ .HardwareType = DAC960_LA_Controller,
+ .FirmwareType = DAC960_V1_Controller,
+ .InterruptHandler = DAC960_LA_InterruptHandler,
+ .MemoryWindowSize = DAC960_LA_RegisterWindowSize,
+};
+
+static struct DAC960_privdata DAC960_PG_privdata = {
+ .HardwareType = DAC960_PG_Controller,
+ .FirmwareType = DAC960_V1_Controller,
+ .InterruptHandler = DAC960_PG_InterruptHandler,
+ .MemoryWindowSize = DAC960_PG_RegisterWindowSize,
+};
+
+static struct DAC960_privdata DAC960_PD_privdata = {
+ .HardwareType = DAC960_PD_Controller,
+ .FirmwareType = DAC960_V1_Controller,
+ .InterruptHandler = DAC960_PD_InterruptHandler,
+ .MemoryWindowSize = DAC960_PD_RegisterWindowSize,
+};
+
+static struct DAC960_privdata DAC960_P_privdata = {
+ .HardwareType = DAC960_P_Controller,
+ .FirmwareType = DAC960_V1_Controller,
+ .InterruptHandler = DAC960_P_InterruptHandler,
+ .MemoryWindowSize = DAC960_PD_RegisterWindowSize,
+};
+
+static const struct pci_device_id DAC960_id_table[] = {
+ {
+ .vendor = PCI_VENDOR_ID_MYLEX,
+ .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM,
+ .subvendor = PCI_VENDOR_ID_MYLEX,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (unsigned long) &DAC960_GEM_privdata,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_MYLEX,
+ .device = PCI_DEVICE_ID_MYLEX_DAC960_BA,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (unsigned long) &DAC960_BA_privdata,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_MYLEX,
+ .device = PCI_DEVICE_ID_MYLEX_DAC960_LP,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (unsigned long) &DAC960_LP_privdata,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_DEC,
+ .device = PCI_DEVICE_ID_DEC_21285,
+ .subvendor = PCI_VENDOR_ID_MYLEX,
+ .subdevice = PCI_DEVICE_ID_MYLEX_DAC960_LA,
+ .driver_data = (unsigned long) &DAC960_LA_privdata,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_MYLEX,
+ .device = PCI_DEVICE_ID_MYLEX_DAC960_PG,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (unsigned long) &DAC960_PG_privdata,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_MYLEX,
+ .device = PCI_DEVICE_ID_MYLEX_DAC960_PD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (unsigned long) &DAC960_PD_privdata,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_MYLEX,
+ .device = PCI_DEVICE_ID_MYLEX_DAC960_P,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (unsigned long) &DAC960_P_privdata,
+ },
+ {0, },
+};
+
+MODULE_DEVICE_TABLE(pci, DAC960_id_table);
+
+static struct pci_driver DAC960_pci_driver = {
+ .name = "DAC960",
+ .id_table = DAC960_id_table,
+ .probe = DAC960_Probe,
+ .remove = DAC960_Remove,
+};
+
+static int __init DAC960_init_module(void)
+{
+ int ret;
+
+ mylex_v1_raid_template = raid_class_attach(&mylex_v1_raid_functions);
+ if (!mylex_v1_raid_template)
+ return -ENODEV;
+ mylex_v2_raid_template = raid_class_attach(&mylex_v2_raid_functions);
+ if (!mylex_v2_raid_template) {
+ raid_class_release(mylex_v1_raid_template);
+ return -ENODEV;
+ }
+
+ ret = pci_register_driver(&DAC960_pci_driver);
+ if (ret) {
+ raid_class_release(mylex_v2_raid_template);
+ raid_class_release(mylex_v1_raid_template);
+ }
+ return ret;
+}
+
+static void __exit DAC960_cleanup_module(void)
+{
+ pci_unregister_driver(&DAC960_pci_driver);
+ raid_class_release(mylex_v2_raid_template);
+ raid_class_release(mylex_v1_raid_template);
+}
+
+module_init(DAC960_init_module);
+module_exit(DAC960_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
new file mode 100644
@@ -0,0 +1,4029 @@
+/*
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Base on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ *
+ * This program is free software; you may redistribute and/or modify it under
+ * the terms of the GNU General Public License Version 2 as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for complete details.
+ *
+ */
+
+#ifndef _MYLEX_H
+#define _MYLEX_H
+
+/*
+ Define the maximum number of DAC960 Controllers supported by this driver.
+*/
+
+#define DAC960_MaxControllers 8
+
+
+/*
+ Define the maximum number of Controller Channels supported by DAC960
+ V1 and V2 Firmware Controllers.
+*/
+
+#define DAC960_V1_MaxChannels 3
+#define DAC960_V2_MaxChannels 4
+
+
+/*
+ Define the maximum number of Targets per Channel supported by DAC960
+ V1 and V2 Firmware Controllers.
+*/
+
+#define DAC960_V1_MaxTargets 16
+#define DAC960_V2_MaxTargets 128
+
+
+/*
+ Define the maximum number of Logical Drives supported by DAC960
+ V1 and V2 Firmware Controllers.
+*/
+
+#define DAC960_MaxLogicalDrives 32
+
+
+/*
+ Define the maximum number of Physical Devices supported by DAC960
+ V1 and V2 Firmware Controllers.
+*/
+
+#define DAC960_V1_MaxPhysicalDevices 45
+#define DAC960_V2_MaxPhysicalDevices 272
+
+/*
+ dma_loaf is used by helper routines to divide a region of
+ dma mapped memory into smaller pieces, where those pieces
+ are not of uniform size.
+ */
+
+struct dma_loaf {
+ void *cpu_base;
+ dma_addr_t dma_base;
+ size_t length;
+ void *cpu_free;
+ dma_addr_t dma_free;
+};
+
+/*
+ Define the DAC960 V1 Firmware Command Opcodes.
+*/
+
+typedef enum
+{
+ /* I/O Commands */
+ DAC960_V1_ReadExtended = 0x33,
+ DAC960_V1_WriteExtended = 0x34,
+ DAC960_V1_ReadAheadExtended = 0x35,
+ DAC960_V1_ReadExtendedWithScatterGather = 0xB3,
+ DAC960_V1_WriteExtendedWithScatterGather = 0xB4,
+ DAC960_V1_Read = 0x36,
+ DAC960_V1_ReadWithScatterGather = 0xB6,
+ DAC960_V1_Write = 0x37,
+ DAC960_V1_WriteWithScatterGather = 0xB7,
+ DAC960_V1_DCDB = 0x04,
+ DAC960_V1_DCDBWithScatterGather = 0x84,
+ DAC960_V1_Flush = 0x0A,
+ /* Controller Status Related Commands */
+ DAC960_V1_Enquiry = 0x53,
+ DAC960_V1_Enquiry2 = 0x1C,
+ DAC960_V1_GetLogicalDriveElement = 0x55,
+ DAC960_V1_GetLogicalDeviceInfo = 0x19,
+ DAC960_V1_IOPortRead = 0x39,
+ DAC960_V1_IOPortWrite = 0x3A,
+ DAC960_V1_GetSDStats = 0x3E,
+ DAC960_V1_GetPDStats = 0x3F,
+ DAC960_V1_PerformEventLogOperation = 0x72,
+ /* Device Related Commands */
+ DAC960_V1_StartDevice = 0x10,
+ DAC960_V1_GetDeviceState = 0x50,
+ DAC960_V1_StopChannel = 0x13,
+ DAC960_V1_StartChannel = 0x12,
+ DAC960_V1_ResetChannel = 0x1A,
+ /* Commands Associated with Data Consistency and Errors */
+ DAC960_V1_Rebuild = 0x09,
+ DAC960_V1_RebuildAsync = 0x16,
+ DAC960_V1_CheckConsistency = 0x0F,
+ DAC960_V1_CheckConsistencyAsync = 0x1E,
+ DAC960_V1_RebuildStat = 0x0C,
+ DAC960_V1_GetRebuildProgress = 0x27,
+ DAC960_V1_RebuildControl = 0x1F,
+ DAC960_V1_ReadBadBlockTable = 0x0B,
+ DAC960_V1_ReadBadDataTable = 0x25,
+ DAC960_V1_ClearBadDataTable = 0x26,
+ DAC960_V1_GetErrorTable = 0x17,
+ DAC960_V1_AddCapacityAsync = 0x2A,
+ DAC960_V1_BackgroundInitializationControl = 0x2B,
+ /* Configuration Related Commands */
+ DAC960_V1_ReadConfig2 = 0x3D,
+ DAC960_V1_WriteConfig2 = 0x3C,
+ DAC960_V1_ReadConfigurationOnDisk = 0x4A,
+ DAC960_V1_WriteConfigurationOnDisk = 0x4B,
+ DAC960_V1_ReadConfiguration = 0x4E,
+ DAC960_V1_ReadBackupConfiguration = 0x4D,
+ DAC960_V1_WriteConfiguration = 0x4F,
+ DAC960_V1_AddConfiguration = 0x4C,
+ DAC960_V1_ReadConfigurationLabel = 0x48,
+ DAC960_V1_WriteConfigurationLabel = 0x49,
+ /* Firmware Upgrade Related Commands */
+ DAC960_V1_LoadImage = 0x20,
+ DAC960_V1_StoreImage = 0x21,
+ DAC960_V1_ProgramImage = 0x22,
+ /* Diagnostic Commands */
+ DAC960_V1_SetDiagnosticMode = 0x31,
+ DAC960_V1_RunDiagnostic = 0x32,
+ /* Subsystem Service Commands */
+ DAC960_V1_GetSubsystemData = 0x70,
+ DAC960_V1_SetSubsystemParameters = 0x71,
+ /* Version 2.xx Firmware Commands */
+ DAC960_V1_Enquiry_Old = 0x05,
+ DAC960_V1_GetDeviceState_Old = 0x14,
+ DAC960_V1_Read_Old = 0x02,
+ DAC960_V1_Write_Old = 0x03,
+ DAC960_V1_ReadWithScatterGather_Old = 0x82,
+ DAC960_V1_WriteWithScatterGather_Old = 0x83
+}
+__attribute__ ((packed))
+DAC960_V1_CommandOpcode_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Command Status Codes.
+*/
+
+#define DAC960_V1_NormalCompletion 0x0000 /* Common */
+#define DAC960_V1_CheckConditionReceived 0x0002 /* Common */
+#define DAC960_V1_NoDeviceAtAddress 0x0102 /* Common */
+#define DAC960_V1_InvalidDeviceAddress 0x0105 /* Common */
+#define DAC960_V1_InvalidParameter 0x0105 /* Common */
+#define DAC960_V1_IrrecoverableDataError 0x0001 /* I/O */
+#define DAC960_V1_LogicalDriveNonexistentOrOffline 0x0002 /* I/O */
+#define DAC960_V1_AccessBeyondEndOfLogicalDrive 0x0105 /* I/O */
+#define DAC960_V1_BadDataEncountered 0x010C /* I/O */
+#define DAC960_V1_DeviceBusy 0x0008 /* DCDB */
+#define DAC960_V1_DeviceNonresponsive 0x000E /* DCDB */
+#define DAC960_V1_CommandTerminatedAbnormally 0x000F /* DCDB */
+#define DAC960_V1_UnableToStartDevice 0x0002 /* Device */
+#define DAC960_V1_InvalidChannelOrTargetOrModifier 0x0105 /* Device */
+#define DAC960_V1_ChannelBusy 0x0106 /* Device */
+#define DAC960_V1_OutOfMemory 0x0107 /* Device */
+#define DAC960_V1_ChannelNotStopped 0x0002 /* Device */
+#define DAC960_V1_AttemptToRebuildOnlineDrive 0x0002 /* Consistency */
+#define DAC960_V1_RebuildBadBlocksEncountered 0x0003 /* Consistency */
+#define DAC960_V1_NewDiskFailedDuringRebuild 0x0004 /* Consistency */
+#define DAC960_V1_RebuildOrCheckAlreadyInProgress 0x0106 /* Consistency */
+#define DAC960_V1_DependentDiskIsDead 0x0002 /* Consistency */
+#define DAC960_V1_InconsistentBlocksFound 0x0003 /* Consistency */
+#define DAC960_V1_InvalidOrNonredundantLogicalDrive 0x0105 /* Consistency */
+#define DAC960_V1_NoRebuildOrCheckInProgress 0x0105 /* Consistency */
+#define DAC960_V1_RebuildInProgress_DataValid 0x0000 /* Consistency */
+#define DAC960_V1_RebuildFailed_LogicalDriveFailure 0x0002 /* Consistency */
+#define DAC960_V1_RebuildFailed_BadBlocksOnOther 0x0003 /* Consistency */
+#define DAC960_V1_RebuildFailed_NewDriveFailed 0x0004 /* Consistency */
+#define DAC960_V1_RebuildSuccessful 0x0100 /* Consistency */
+#define DAC960_V1_RebuildSuccessfullyTerminated 0x0107 /* Consistency */
+#define DAC960_V1_BackgroundInitSuccessful 0x0100 /* Consistency */
+#define DAC960_V1_BackgroundInitAborted 0x0005 /* Consistency */
+#define DAC960_V1_NoBackgroundInitInProgress 0x0105 /* Consistency */
+#define DAC960_V1_AddCapacityInProgress 0x0004 /* Consistency */
+#define DAC960_V1_AddCapacityFailedOrSuspended 0x00F4 /* Consistency */
+#define DAC960_V1_Config2ChecksumError 0x0002 /* Configuration */
+#define DAC960_V1_ConfigurationSuspended 0x0106 /* Configuration */
+#define DAC960_V1_FailedToConfigureNVRAM 0x0105 /* Configuration */
+#define DAC960_V1_ConfigurationNotSavedStateChange 0x0106 /* Configuration */
+#define DAC960_V1_SubsystemNotInstalled 0x0001 /* Subsystem */
+#define DAC960_V1_SubsystemFailed 0x0002 /* Subsystem */
+#define DAC960_V1_SubsystemBusy 0x0106 /* Subsystem */
+
+
+/*
+ Define the DAC960 V1 Firmware Enquiry Command reply structure.
+*/
+
+typedef struct DAC960_V1_Enquiry
+{
+ unsigned char NumberOfLogicalDrives; /* Byte 0 */
+ unsigned int :24; /* Bytes 1-3 */
+ unsigned int LogicalDriveSizes[32]; /* Bytes 4-131 */
+ unsigned short FlashAge; /* Bytes 132-133 */
+ struct {
+ bool DeferredWriteError:1; /* Byte 134 Bit 0 */
+ bool BatteryLow:1; /* Byte 134 Bit 1 */
+ unsigned char :6; /* Byte 134 Bits 2-7 */
+ } StatusFlags;
+ unsigned char :8; /* Byte 135 */
+ unsigned char MinorFirmwareVersion; /* Byte 136 */
+ unsigned char MajorFirmwareVersion; /* Byte 137 */
+ enum {
+ DAC960_V1_NoStandbyRebuildOrCheckInProgress = 0x00,
+ DAC960_V1_StandbyRebuildInProgress = 0x01,
+ DAC960_V1_BackgroundRebuildInProgress = 0x02,
+ DAC960_V1_BackgroundCheckInProgress = 0x03,
+ DAC960_V1_StandbyRebuildCompletedWithError = 0xFF,
+ DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed = 0xF0,
+ DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed = 0xF1,
+ DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses = 0xF2,
+ DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated = 0xF3
+ } __attribute__ ((packed)) RebuildFlag; /* Byte 138 */
+ unsigned char MaxCommands; /* Byte 139 */
+ unsigned char OfflineLogicalDriveCount; /* Byte 140 */
+ unsigned char :8; /* Byte 141 */
+ unsigned short EventLogSequenceNumber; /* Bytes 142-143 */
+ unsigned char CriticalLogicalDriveCount; /* Byte 144 */
+ unsigned int :24; /* Bytes 145-147 */
+ unsigned char DeadDriveCount; /* Byte 148 */
+ unsigned char :8; /* Byte 149 */
+ unsigned char RebuildCount; /* Byte 150 */
+ struct {
+ unsigned char :3; /* Byte 151 Bits 0-2 */
+ bool BatteryBackupUnitPresent:1; /* Byte 151 Bit 3 */
+ unsigned char :3; /* Byte 151 Bits 4-6 */
+ unsigned char :1; /* Byte 151 Bit 7 */
+ } MiscFlags;
+ struct {
+ unsigned char TargetID;
+ unsigned char Channel;
+ } DeadDrives[21]; /* Bytes 152-194 */
+ unsigned char Reserved[62]; /* Bytes 195-255 */
+}
+__attribute__ ((packed))
+DAC960_V1_Enquiry_T;
+
+#define DAC960_V1_ControllerIsRebuilding(c) \
+ ((c)->V1.Enquiry.RebuildFlag == DAC960_V1_BackgroundRebuildInProgress)
+#define DAC960_V1_ControllerConsistencyCheck(c) \
+ ((c)->V1.Enquiry.RebuildFlag == DAC960_V1_BackgroundCheckInProgress)
+
+/*
+ Define the DAC960 V1 Firmware Enquiry2 Command reply structure.
+*/
+
+typedef struct DAC960_V1_Enquiry2
+{
+ struct {
+ enum {
+ DAC960_V1_P_PD_PU = 0x01,
+ DAC960_V1_PL = 0x02,
+ DAC960_V1_PG = 0x10,
+ DAC960_V1_PJ = 0x11,
+ DAC960_V1_PR = 0x12,
+ DAC960_V1_PT = 0x13,
+ DAC960_V1_PTL0 = 0x14,
+ DAC960_V1_PRL = 0x15,
+ DAC960_V1_PTL1 = 0x16,
+ DAC960_V1_1164P = 0x20
+ } __attribute__ ((packed)) SubModel; /* Byte 0 */
+ unsigned char ActualChannels; /* Byte 1 */
+ enum {
+ DAC960_V1_FiveChannelBoard = 0x01,
+ DAC960_V1_ThreeChannelBoard = 0x02,
+ DAC960_V1_TwoChannelBoard = 0x03,
+ DAC960_V1_ThreeChannelASIC_DAC = 0x04
+ } __attribute__ ((packed)) Model; /* Byte 2 */
+ enum {
+ DAC960_V1_EISA_Controller = 0x01,
+ DAC960_V1_MicroChannel_Controller = 0x02,
+ DAC960_V1_PCI_Controller = 0x03,
+ DAC960_V1_SCSItoSCSI_Controller = 0x08
+ } __attribute__ ((packed)) ProductFamily; /* Byte 3 */
+ } HardwareID; /* Bytes 0-3 */
+ /* MajorVersion.MinorVersion-FirmwareType-TurnID */
+ struct {
+ unsigned char MajorVersion; /* Byte 4 */
+ unsigned char MinorVersion; /* Byte 5 */
+ unsigned char TurnID; /* Byte 6 */
+ char FirmwareType; /* Byte 7 */
+ } FirmwareID; /* Bytes 4-7 */
+ unsigned char :8; /* Byte 8 */
+ unsigned int :24; /* Bytes 9-11 */
+ unsigned char ConfiguredChannels; /* Byte 12 */
+ unsigned char ActualChannels; /* Byte 13 */
+ unsigned char MaxTargets; /* Byte 14 */
+ unsigned char MaxTags; /* Byte 15 */
+ unsigned char MaxLogicalDrives; /* Byte 16 */
+ unsigned char MaxArms; /* Byte 17 */
+ unsigned char MaxSpans; /* Byte 18 */
+ unsigned char :8; /* Byte 19 */
+ unsigned int :32; /* Bytes 20-23 */
+ unsigned int MemorySize; /* Bytes 24-27 */
+ unsigned int CacheSize; /* Bytes 28-31 */
+ unsigned int FlashMemorySize; /* Bytes 32-35 */
+ unsigned int NonVolatileMemorySize; /* Bytes 36-39 */
+ struct {
+ enum {
+ DAC960_V1_RamType_DRAM = 0x0,
+ DAC960_V1_RamType_EDO = 0x1,
+ DAC960_V1_RamType_SDRAM = 0x2,
+ DAC960_V1_RamType_Last = 0x7
+ } __attribute__ ((packed)) RamType:3; /* Byte 40 Bits 0-2 */
+ enum {
+ DAC960_V1_ErrorCorrection_None = 0x0,
+ DAC960_V1_ErrorCorrection_Parity = 0x1,
+ DAC960_V1_ErrorCorrection_ECC = 0x2,
+ DAC960_V1_ErrorCorrection_Last = 0x7
+ } __attribute__ ((packed)) ErrorCorrection:3; /* Byte 40 Bits 3-5 */
+ bool FastPageMode:1; /* Byte 40 Bit 6 */
+ bool LowPowerMemory:1; /* Byte 40 Bit 7 */
+ unsigned char :8; /* Bytes 41 */
+ } MemoryType;
+ unsigned short ClockSpeed; /* Bytes 42-43 */
+ unsigned short MemorySpeed; /* Bytes 44-45 */
+ unsigned short HardwareSpeed; /* Bytes 46-47 */
+ unsigned int :32; /* Bytes 48-51 */
+ unsigned int :32; /* Bytes 52-55 */
+ unsigned char :8; /* Byte 56 */
+ unsigned char :8; /* Byte 57 */
+ unsigned short :16; /* Bytes 58-59 */
+ unsigned short MaxCommands; /* Bytes 60-61 */
+ unsigned short MaxScatterGatherEntries; /* Bytes 62-63 */
+ unsigned short MaxDriveCommands; /* Bytes 64-65 */
+ unsigned short MaxIODescriptors; /* Bytes 66-67 */
+ unsigned short MaxCombinedSectors; /* Bytes 68-69 */
+ unsigned char Latency; /* Byte 70 */
+ unsigned char :8; /* Byte 71 */
+ unsigned char SCSITimeout; /* Byte 72 */
+ unsigned char :8; /* Byte 73 */
+ unsigned short MinFreeLines; /* Bytes 74-75 */
+ unsigned int :32; /* Bytes 76-79 */
+ unsigned int :32; /* Bytes 80-83 */
+ unsigned char RebuildRateConstant; /* Byte 84 */
+ unsigned char :8; /* Byte 85 */
+ unsigned char :8; /* Byte 86 */
+ unsigned char :8; /* Byte 87 */
+ unsigned int :32; /* Bytes 88-91 */
+ unsigned int :32; /* Bytes 92-95 */
+ unsigned short PhysicalDriveBlockSize; /* Bytes 96-97 */
+ unsigned short LogicalDriveBlockSize; /* Bytes 98-99 */
+ unsigned short MaxBlocksPerCommand; /* Bytes 100-101 */
+ unsigned short BlockFactor; /* Bytes 102-103 */
+ unsigned short CacheLineSize; /* Bytes 104-105 */
+ struct {
+ enum {
+ DAC960_V1_Narrow_8bit = 0x0,
+ DAC960_V1_Wide_16bit = 0x1,
+ DAC960_V1_Wide_32bit = 0x2
+ } __attribute__ ((packed)) BusWidth:2; /* Byte 106 Bits 0-1 */
+ enum {
+ DAC960_V1_Fast = 0x0,
+ DAC960_V1_Ultra = 0x1,
+ DAC960_V1_Ultra2 = 0x2
+ } __attribute__ ((packed)) BusSpeed:2; /* Byte 106 Bits 2-3 */
+ bool Differential:1; /* Byte 106 Bit 4 */
+ unsigned char :3; /* Byte 106 Bits 5-7 */
+ } SCSICapability;
+ unsigned char :8; /* Byte 107 */
+ unsigned int :32; /* Bytes 108-111 */
+ unsigned short FirmwareBuildNumber; /* Bytes 112-113 */
+ enum {
+ DAC960_V1_AEMI = 0x01,
+ DAC960_V1_OEM1 = 0x02,
+ DAC960_V1_OEM2 = 0x04,
+ DAC960_V1_OEM3 = 0x08,
+ DAC960_V1_Conner = 0x10,
+ DAC960_V1_SAFTE = 0x20
+ } __attribute__ ((packed)) FaultManagementType; /* Byte 114 */
+ unsigned char :8; /* Byte 115 */
+ struct {
+ bool Clustering:1; /* Byte 116 Bit 0 */
+ bool MylexOnlineRAIDExpansion:1; /* Byte 116 Bit 1 */
+ bool ReadAhead:1; /* Byte 116 Bit 2 */
+ bool BackgroundInitialization:1; /* Byte 116 Bit 3 */
+ unsigned int :28; /* Bytes 116-119 */
+ } FirmwareFeatures;
+ unsigned int :32; /* Bytes 120-123 */
+ unsigned int :32; /* Bytes 124-127 */
+}
+DAC960_V1_Enquiry2_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Logical Drive State type.
+*/
+
+typedef enum
+{
+ DAC960_V1_Device_Dead = 0x00,
+ DAC960_V1_Device_WriteOnly = 0x02,
+ DAC960_V1_Device_Online = 0x03,
+ DAC960_V1_Device_Critical = 0x04,
+ DAC960_V1_Device_Standby = 0x10,
+ DAC960_V1_Device_Offline = 0xFF
+}
+__attribute__ ((packed))
+DAC960_V1_DriveState_T;
+
+
+/*
+ * Define the DAC960 V1 RAID Levels
+ */
+typedef enum {
+ DAC960_V1_RAID_Level0 = 0x0, /* RAID 0 */
+ DAC960_V1_RAID_Level1 = 0x1, /* RAID 1 */
+ DAC960_V1_RAID_Level3 = 0x3, /* RAID 3 */
+ DAC960_V1_RAID_Level5 = 0x5, /* RAID 5 */
+ DAC960_V1_RAID_Level6 = 0x6, /* RAID 6 */
+ DAC960_V1_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */
+}
+__attribute__ ((packed))
+DAC960_V1_RAIDLevel_T;
+
+/*
+ Define the DAC960 V1 Firmware Logical Drive Information structure.
+*/
+
+typedef struct DAC960_V1_LogicalDeviceInfo
+{
+ unsigned int Size; /* Bytes 0-3 */
+ DAC960_V1_DriveState_T State; /* Byte 4 */
+ unsigned char RAIDLevel:7; /* Byte 5 Bits 0-6 */
+ bool WriteBack:1; /* Byte 5 Bit 7 */
+ unsigned short :16; /* Bytes 6-7 */
+}
+DAC960_V1_LogicalDeviceInfo_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Get Logical Drive Information Command
+ reply structure.
+*/
+
+typedef DAC960_V1_LogicalDeviceInfo_T
+DAC960_V1_LogicalDeviceInfoArray_T[DAC960_MaxLogicalDrives];
+
+
+/*
+ Define the DAC960 V1 Firmware Perform Event Log Operation Types.
+*/
+
+typedef enum
+{
+ DAC960_V1_GetEventLogEntry = 0x00
+}
+__attribute__ ((packed))
+DAC960_V1_PerformEventLogOpType_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Get Event Log Entry Command reply structure.
+*/
+
+typedef struct DAC960_V1_EventLogEntry
+{
+ unsigned char MessageType; /* Byte 0 */
+ unsigned char MessageLength; /* Byte 1 */
+ unsigned char TargetID:5; /* Byte 2 Bits 0-4 */
+ unsigned char Channel:3; /* Byte 2 Bits 5-7 */
+ unsigned char LogicalUnit:6; /* Byte 3 Bits 0-5 */
+ unsigned char rsvd1:2; /* Byte 3 Bits 6-7 */
+ unsigned short SequenceNumber; /* Bytes 4-5 */
+ unsigned char SenseData[26]; /* Bytes 6-31 */
+}
+DAC960_V1_EventLogEntry_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Get Device State Command reply structure.
+ The structure is padded by 2 bytes for compatibility with Version 2.xx
+ Firmware.
+*/
+
+typedef struct DAC960_V1_DeviceState
+{
+ bool Present:1; /* Byte 0 Bit 0 */
+ unsigned char :7; /* Byte 0 Bits 1-7 */
+ enum {
+ DAC960_V1_OtherType = 0x0,
+ DAC960_V1_DiskType = 0x1,
+ DAC960_V1_SequentialType = 0x2,
+ DAC960_V1_CDROM_or_WORM_Type = 0x3
+ } __attribute__ ((packed)) DeviceType:2; /* Byte 1 Bits 0-1 */
+ bool rsvd1:1; /* Byte 1 Bit 2 */
+ bool Fast20:1; /* Byte 1 Bit 3 */
+ bool Sync:1; /* Byte 1 Bit 4 */
+ bool Fast:1; /* Byte 1 Bit 5 */
+ bool Wide:1; /* Byte 1 Bit 6 */
+ bool TaggedQueuingSupported:1; /* Byte 1 Bit 7 */
+ DAC960_V1_DriveState_T State; /* Byte 2 */
+ unsigned char rsvd2:8; /* Byte 3 */
+ unsigned char SynchronousMultiplier; /* Byte 4 */
+ unsigned char SynchronousOffset:5; /* Byte 5 Bits 0-4 */
+ unsigned char rsvd3:3; /* Byte 5 Bits 5-7 */
+ unsigned int Size __attribute__ ((packed)); /* Bytes 6-9 */
+ unsigned short rsvd4:16; /* Bytes 10-11 */
+}
+DAC960_V1_DeviceState_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
+*/
+
+typedef struct DAC960_V1_RebuildProgress
+{
+ unsigned int LogicalDriveNumber; /* Bytes 0-3 */
+ unsigned int LogicalDriveSize; /* Bytes 4-7 */
+ unsigned int RemainingBlocks; /* Bytes 8-11 */
+}
+DAC960_V1_RebuildProgress_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Background Initialization Status Command
+ reply structure.
+*/
+
+typedef struct DAC960_V1_BackgroundInitializationStatus
+{
+ unsigned int LogicalDriveSize; /* Bytes 0-3 */
+ unsigned int BlocksCompleted; /* Bytes 4-7 */
+ unsigned char Reserved1[12]; /* Bytes 8-19 */
+ unsigned int LogicalDriveNumber; /* Bytes 20-23 */
+ unsigned char RAIDLevel; /* Byte 24 */
+ enum {
+ DAC960_V1_BackgroundInitializationInvalid = 0x00,
+ DAC960_V1_BackgroundInitializationStarted = 0x02,
+ DAC960_V1_BackgroundInitializationInProgress = 0x04,
+ DAC960_V1_BackgroundInitializationSuspended = 0x05,
+ DAC960_V1_BackgroundInitializationCancelled = 0x06
+ } __attribute__ ((packed)) Status; /* Byte 25 */
+ unsigned char Reserved2[6]; /* Bytes 26-31 */
+}
+DAC960_V1_BackgroundInitializationStatus_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Error Table Entry structure.
+*/
+
+typedef struct DAC960_V1_ErrorTableEntry
+{
+ unsigned char ParityErrorCount; /* Byte 0 */
+ unsigned char SoftErrorCount; /* Byte 1 */
+ unsigned char HardErrorCount; /* Byte 2 */
+ unsigned char MiscErrorCount; /* Byte 3 */
+}
+DAC960_V1_ErrorTableEntry_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Get Error Table Command reply structure.
+*/
+
+typedef struct DAC960_V1_ErrorTable
+{
+ DAC960_V1_ErrorTableEntry_T
+ ErrorTableEntries[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
+}
+DAC960_V1_ErrorTable_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Read Config2 Command reply structure.
+*/
+
+typedef struct DAC960_V1_Config2
+{
+ unsigned char :1; /* Byte 0 Bit 0 */
+ bool ActiveNegationEnabled:1; /* Byte 0 Bit 1 */
+ unsigned char :5; /* Byte 0 Bits 2-6 */
+ bool NoRescanIfResetReceivedDuringScan:1; /* Byte 0 Bit 7 */
+ bool StorageWorksSupportEnabled:1; /* Byte 1 Bit 0 */
+ bool HewlettPackardSupportEnabled:1; /* Byte 1 Bit 1 */
+ bool NoDisconnectOnFirstCommand:1; /* Byte 1 Bit 2 */
+ unsigned char :2; /* Byte 1 Bits 3-4 */
+ bool AEMI_ARM:1; /* Byte 1 Bit 5 */
+ bool AEMI_OFM:1; /* Byte 1 Bit 6 */
+ unsigned char :1; /* Byte 1 Bit 7 */
+ enum {
+ DAC960_V1_OEMID_Mylex = 0x00,
+ DAC960_V1_OEMID_IBM = 0x08,
+ DAC960_V1_OEMID_HP = 0x0A,
+ DAC960_V1_OEMID_DEC = 0x0C,
+ DAC960_V1_OEMID_Siemens = 0x10,
+ DAC960_V1_OEMID_Intel = 0x12
+ } __attribute__ ((packed)) OEMID; /* Byte 2 */
+ unsigned char OEMModelNumber; /* Byte 3 */
+ unsigned char PhysicalSector; /* Byte 4 */
+ unsigned char LogicalSector; /* Byte 5 */
+ unsigned char BlockFactor; /* Byte 6 */
+ bool ReadAheadEnabled:1; /* Byte 7 Bit 0 */
+ bool LowBIOSDelay:1; /* Byte 7 Bit 1 */
+ unsigned char :2; /* Byte 7 Bits 2-3 */
+ bool ReassignRestrictedToOneSector:1; /* Byte 7 Bit 4 */
+ unsigned char :1; /* Byte 7 Bit 5 */
+ bool ForceUnitAccessDuringWriteRecovery:1; /* Byte 7 Bit 6 */
+ bool EnableLeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */
+ unsigned char DefaultRebuildRate; /* Byte 8 */
+ unsigned char :8; /* Byte 9 */
+ unsigned char BlocksPerCacheLine; /* Byte 10 */
+ unsigned char BlocksPerStripe; /* Byte 11 */
+ struct {
+ enum {
+ DAC960_V1_Async = 0x0,
+ DAC960_V1_Sync_8MHz = 0x1,
+ DAC960_V1_Sync_5MHz = 0x2,
+ DAC960_V1_Sync_10or20MHz = 0x3
+ } __attribute__ ((packed)) Speed:2; /* Byte 11 Bits 0-1 */
+ bool Force8Bit:1; /* Byte 11 Bit 2 */
+ bool DisableFast20:1; /* Byte 11 Bit 3 */
+ unsigned char :3; /* Byte 11 Bits 4-6 */
+ bool EnableTaggedQueuing:1; /* Byte 11 Bit 7 */
+ } __attribute__ ((packed)) ChannelParameters[6]; /* Bytes 12-17 */
+ unsigned char SCSIInitiatorID; /* Byte 18 */
+ unsigned char :8; /* Byte 19 */
+ enum {
+ DAC960_V1_StartupMode_ControllerSpinUp = 0x00,
+ DAC960_V1_StartupMode_PowerOnSpinUp = 0x01
+ } __attribute__ ((packed)) StartupMode; /* Byte 20 */
+ unsigned char SimultaneousDeviceSpinUpCount; /* Byte 21 */
+ unsigned char SecondsDelayBetweenSpinUps; /* Byte 22 */
+ unsigned char Reserved1[29]; /* Bytes 23-51 */
+ bool BIOSDisabled:1; /* Byte 52 Bit 0 */
+ bool CDROMBootEnabled:1; /* Byte 52 Bit 1 */
+ unsigned char :3; /* Byte 52 Bits 2-4 */
+ enum {
+ DAC960_V1_Geometry_128_32 = 0x0,
+ DAC960_V1_Geometry_255_63 = 0x1,
+ DAC960_V1_Geometry_Reserved1 = 0x2,
+ DAC960_V1_Geometry_Reserved2 = 0x3
+ } __attribute__ ((packed)) DriveGeometry:2; /* Byte 52 Bits 5-6 */
+ unsigned char :1; /* Byte 52 Bit 7 */
+ unsigned char Reserved2[9]; /* Bytes 53-61 */
+ unsigned short Checksum; /* Bytes 62-63 */
+}
+DAC960_V1_Config2_T;
+
+
+/*
+ Define the DAC960 V1 Firmware DCDB request structure.
+*/
+
+typedef struct DAC960_V1_DCDB
+{
+ unsigned char TargetID:4; /* Byte 0 Bits 0-3 */
+ unsigned char Channel:4; /* Byte 0 Bits 4-7 */
+ enum {
+ DAC960_V1_DCDB_NoDataTransfer = 0,
+ DAC960_V1_DCDB_DataTransferDeviceToSystem = 1,
+ DAC960_V1_DCDB_DataTransferSystemToDevice = 2,
+ DAC960_V1_DCDB_IllegalDataTransfer = 3
+ } __attribute__ ((packed)) Direction:2; /* Byte 1 Bits 0-1 */
+ bool EarlyStatus:1; /* Byte 1 Bit 2 */
+ unsigned char :1; /* Byte 1 Bit 3 */
+ enum {
+ DAC960_V1_DCDB_Timeout_24_hours = 0,
+ DAC960_V1_DCDB_Timeout_10_seconds = 1,
+ DAC960_V1_DCDB_Timeout_60_seconds = 2,
+ DAC960_V1_DCDB_Timeout_10_minutes = 3
+ } __attribute__ ((packed)) Timeout:2; /* Byte 1 Bits 4-5 */
+ bool NoAutomaticRequestSense:1; /* Byte 1 Bit 6 */
+ bool DisconnectPermitted:1; /* Byte 1 Bit 7 */
+ unsigned short TransferLength; /* Bytes 2-3 */
+ u32 BusAddress; /* Bytes 4-7 */
+ unsigned char CDBLength:4; /* Byte 8 Bits 0-3 */
+ unsigned char TransferLengthHigh4:4; /* Byte 8 Bits 4-7 */
+ unsigned char SenseLength; /* Byte 9 */
+ unsigned char CDB[12]; /* Bytes 10-21 */
+ unsigned char SenseData[64]; /* Bytes 22-85 */
+ unsigned char Status; /* Byte 86 */
+ unsigned char :8; /* Byte 87 */
+}
+DAC960_V1_DCDB_T;
+
+
+/*
+ Define the DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
+ 32 Bit Byte Count structure.
+*/
+
+typedef struct DAC960_V1_ScatterGatherSegment
+{
+ u32 SegmentDataPointer; /* Bytes 0-3 */
+ u32 SegmentByteCount; /* Bytes 4-7 */
+}
+DAC960_V1_ScatterGatherSegment_T;
+
+
+/*
+ Define the 13 Byte DAC960 V1 Firmware Command Mailbox structure. Bytes 13-15
+ are not used. The Command Mailbox structure is padded to 16 bytes for
+ efficient access.
+*/
+
+typedef union DAC960_V1_CommandMailbox
+{
+ unsigned int Words[4]; /* Words 0-3 */
+ unsigned char Bytes[16]; /* Bytes 0-15 */
+ struct {
+ DAC960_V1_CommandOpcode_T opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char Dummy[14]; /* Bytes 2-15 */
+ } __attribute__ ((packed)) Common;
+ struct {
+ DAC960_V1_CommandOpcode_T opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char Dummy1[6]; /* Bytes 2-7 */
+ u32 BusAddress; /* Bytes 8-11 */
+ unsigned char Dummy2[4]; /* Bytes 12-15 */
+ } __attribute__ ((packed)) Type3;
+ struct {
+ DAC960_V1_CommandOpcode_T opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char CommandOpcode2; /* Byte 2 */
+ unsigned char Dummy1[5]; /* Bytes 3-7 */
+ u32 BusAddress; /* Bytes 8-11 */
+ unsigned char Dummy2[4]; /* Bytes 12-15 */
+ } __attribute__ ((packed)) Type3B;
+ struct {
+ DAC960_V1_CommandOpcode_T opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char Dummy1[5]; /* Bytes 2-6 */
+ unsigned char LogicalDriveNumber:6; /* Byte 7 Bits 0-6 */
+ bool AutoRestore:1; /* Byte 7 Bit 7 */
+ unsigned char Dummy2[8]; /* Bytes 8-15 */
+ } __attribute__ ((packed)) Type3C;
+ struct {
+ DAC960_V1_CommandOpcode_T opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char Channel; /* Byte 2 */
+ unsigned char TargetID; /* Byte 3 */
+ DAC960_V1_DriveState_T State; /* Byte 4 Bits */
+ unsigned char Dummy1[3]; /* Bytes 5-7 */
+ u32 BusAddress; /* Bytes 8-11 */
+ unsigned char Dummy2[4]; /* Bytes 12-15 */
+ } __attribute__ ((packed)) Type3D;
+ struct {
+ DAC960_V1_CommandOpcode_T opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ DAC960_V1_PerformEventLogOpType_T OperationType; /* Byte 2 */
+ unsigned char OperationQualifier; /* Byte 3 */
+ unsigned short SequenceNumber; /* Bytes 4-5 */
+ unsigned char Dummy1[2]; /* Bytes 6-7 */
+ u32 BusAddress; /* Bytes 8-11 */
+ unsigned char Dummy2[4]; /* Bytes 12-15 */
+ } __attribute__ ((packed)) Type3E;
+ struct {
+ DAC960_V1_CommandOpcode_T opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char Dummy1[2]; /* Bytes 2-3 */
+ unsigned char RebuildRateConstant; /* Byte 4 */
+ unsigned char Dummy2[3]; /* Bytes 5-7 */
+ u32 BusAddress; /* Bytes 8-11 */
+ unsigned char Dummy3[4]; /* Bytes 12-15 */
+ } __attribute__ ((packed)) Type3R;
+ struct {
+ DAC960_V1_CommandOpcode_T opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned short TransferLength; /* Bytes 2-3 */
+ unsigned int LogicalBlockAddress; /* Bytes 4-7 */
+ u32 BusAddress; /* Bytes 8-11 */
+ unsigned char LogicalDriveNumber; /* Byte 12 */
+ unsigned char Dummy[3]; /* Bytes 13-15 */
+ } __attribute__ ((packed)) Type4;
+ struct {
+ DAC960_V1_CommandOpcode_T opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ struct {
+ unsigned short TransferLength:11; /* Bytes 2-3 */
+ unsigned char LogicalDriveNumber:5; /* Byte 3 Bits 3-7 */
+ } __attribute__ ((packed)) LD;
+ unsigned int LogicalBlockAddress; /* Bytes 4-7 */
+ u32 BusAddress; /* Bytes 8-11 */
+ unsigned char ScatterGatherCount:6; /* Byte 12 Bits 0-5 */
+ enum {
+ DAC960_V1_ScatterGather_32BitAddress_32BitByteCount = 0x0,
+ DAC960_V1_ScatterGather_32BitAddress_16BitByteCount = 0x1,
+ DAC960_V1_ScatterGather_32BitByteCount_32BitAddress = 0x2,
+ DAC960_V1_ScatterGather_16BitByteCount_32BitAddress = 0x3
+ } __attribute__ ((packed)) ScatterGatherType:2; /* Byte 12 Bits 6-7 */
+ unsigned char Dummy[3]; /* Bytes 13-15 */
+ } __attribute__ ((packed)) Type5;
+ struct {
+ DAC960_V1_CommandOpcode_T opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char CommandOpcode2; /* Byte 2 */
+ unsigned char :8; /* Byte 3 */
+ u32 CommandMailboxesBusAddress; /* Bytes 4-7 */
+ u32 StatusMailboxesBusAddress; /* Bytes 8-11 */
+ unsigned char Dummy[4]; /* Bytes 12-15 */
+ } __attribute__ ((packed)) TypeX;
+}
+DAC960_V1_CommandMailbox_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Command Opcodes.
+*/
+
+typedef enum
+{
+ DAC960_V2_MemCopy = 0x01,
+ DAC960_V2_SCSI_10_Passthru = 0x02,
+ DAC960_V2_SCSI_255_Passthru = 0x03,
+ DAC960_V2_SCSI_10 = 0x04,
+ DAC960_V2_SCSI_256 = 0x05,
+ DAC960_V2_IOCTL = 0x20
+}
+__attribute__ ((packed))
+DAC960_V2_CommandOpcode_T;
+
+
+/*
+ Define the DAC960 V2 Firmware IOCTL Opcodes.
+*/
+
+typedef enum
+{
+ DAC960_V2_GetControllerInfo = 0x01,
+ DAC960_V2_GetLogicalDeviceInfoValid = 0x03,
+ DAC960_V2_GetPhysicalDeviceInfoValid = 0x05,
+ DAC960_V2_GetHealthStatus = 0x11,
+ DAC960_V2_GetEvent = 0x15,
+ DAC960_V2_StartDiscovery = 0x81,
+ DAC960_V2_SetDeviceState = 0x82,
+ DAC960_V2_InitPhysicalDeviceStart = 0x84,
+ DAC960_V2_InitPhysicalDeviceStop = 0x85,
+ DAC960_V2_InitLogicalDeviceStart = 0x86,
+ DAC960_V2_InitLogicalDeviceStop = 0x87,
+ DAC960_V2_RebuildDeviceStart = 0x88,
+ DAC960_V2_RebuildDeviceStop = 0x89,
+ DAC960_V2_MakeConsistencDataStart = 0x8A,
+ DAC960_V2_MakeConsistencDataStop = 0x8B,
+ DAC960_V2_ConsistencyCheckStart = 0x8C,
+ DAC960_V2_ConsistencyCheckStop = 0x8D,
+ DAC960_V2_SetMemoryMailbox = 0x8E,
+ DAC960_V2_ResetDevice = 0x90,
+ DAC960_V2_FlushDeviceData = 0x91,
+ DAC960_V2_PauseDevice = 0x92,
+ DAC960_V2_UnPauseDevice = 0x93,
+ DAC960_V2_LocateDevice = 0x94,
+ DAC960_V2_CreateNewConfiguration = 0xC0,
+ DAC960_V2_DeleteLogicalDevice = 0xC1,
+ DAC960_V2_ReplaceInternalDevice = 0xC2,
+ DAC960_V2_RenameLogicalDevice = 0xC3,
+ DAC960_V2_AddNewConfiguration = 0xC4,
+ DAC960_V2_TranslatePhysicalToLogicalDevice = 0xC5,
+ DAC960_V2_ClearConfiguration = 0xCA,
+}
+__attribute__ ((packed))
+DAC960_V2_IOCTL_Opcode_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Command Status Codes.
+*/
+
+#define DAC960_V2_NormalCompletion 0x00
+#define DAC960_V2_AbnormalCompletion 0x02
+#define DAC960_V2_DeviceBusy 0x08
+#define DAC960_V2_DeviceNonresponsive 0x0E
+#define DAC960_V2_DeviceNonresponsive2 0x0F
+#define DAC960_V2_DeviceRevervationConflict 0x18
+
+
+/*
+ Define the DAC960 V2 Firmware Memory Type structure.
+*/
+
+typedef struct DAC960_V2_MemoryType
+{
+ enum {
+ DAC960_V2_MemoryType_Reserved = 0x00,
+ DAC960_V2_MemoryType_DRAM = 0x01,
+ DAC960_V2_MemoryType_EDRAM = 0x02,
+ DAC960_V2_MemoryType_EDO = 0x03,
+ DAC960_V2_MemoryType_SDRAM = 0x04,
+ DAC960_V2_MemoryType_Last = 0x1F
+ } __attribute__ ((packed)) MemoryType:5; /* Byte 0 Bits 0-4 */
+bool :1; /* Byte 0 Bit 5 */
+ bool MemoryParity:1; /* Byte 0 Bit 6 */
+ bool MemoryECC:1; /* Byte 0 Bit 7 */
+}
+DAC960_V2_MemoryType_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Processor Type structure.
+*/
+
+typedef enum
+{
+ DAC960_V2_ProcessorType_i960CA = 0x01,
+ DAC960_V2_ProcessorType_i960RD = 0x02,
+ DAC960_V2_ProcessorType_i960RN = 0x03,
+ DAC960_V2_ProcessorType_i960RP = 0x04,
+ DAC960_V2_ProcessorType_NorthBay = 0x05,
+ DAC960_V2_ProcessorType_StrongArm = 0x06,
+ DAC960_V2_ProcessorType_i960RM = 0x07
+}
+__attribute__ ((packed))
+DAC960_V2_ProcessorType_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Get Controller Info reply structure.
+*/
+
+typedef struct DAC960_V2_ControllerInfo
+{
+ unsigned char :8; /* Byte 0 */
+ enum {
+ DAC960_V2_SCSI_Bus = 0x00,
+ DAC960_V2_Fibre_Bus = 0x01,
+ DAC960_V2_PCI_Bus = 0x03
+ } __attribute__ ((packed)) BusInterfaceType; /* Byte 1 */
+ enum {
+ DAC960_V2_DAC960E = 0x01,
+ DAC960_V2_DAC960M = 0x08,
+ DAC960_V2_DAC960PD = 0x10,
+ DAC960_V2_DAC960PL = 0x11,
+ DAC960_V2_DAC960PU = 0x12,
+ DAC960_V2_DAC960PE = 0x13,
+ DAC960_V2_DAC960PG = 0x14,
+ DAC960_V2_DAC960PJ = 0x15,
+ DAC960_V2_DAC960PTL0 = 0x16,
+ DAC960_V2_DAC960PR = 0x17,
+ DAC960_V2_DAC960PRL = 0x18,
+ DAC960_V2_DAC960PT = 0x19,
+ DAC960_V2_DAC1164P = 0x1A,
+ DAC960_V2_DAC960PTL1 = 0x1B,
+ DAC960_V2_EXR2000P = 0x1C,
+ DAC960_V2_EXR3000P = 0x1D,
+ DAC960_V2_AcceleRAID352 = 0x1E,
+ DAC960_V2_AcceleRAID170 = 0x1F,
+ DAC960_V2_AcceleRAID160 = 0x20,
+ DAC960_V2_DAC960S = 0x60,
+ DAC960_V2_DAC960SU = 0x61,
+ DAC960_V2_DAC960SX = 0x62,
+ DAC960_V2_DAC960SF = 0x63,
+ DAC960_V2_DAC960SS = 0x64,
+ DAC960_V2_DAC960FL = 0x65,
+ DAC960_V2_DAC960LL = 0x66,
+ DAC960_V2_DAC960FF = 0x67,
+ DAC960_V2_DAC960HP = 0x68,
+ DAC960_V2_RAIDBRICK = 0x69,
+ DAC960_V2_METEOR_FL = 0x6A,
+ DAC960_V2_METEOR_FF = 0x6B
+ } __attribute__ ((packed)) ControllerType; /* Byte 2 */
+ unsigned char :8; /* Byte 3 */
+ unsigned short BusInterfaceSpeedMHz; /* Bytes 4-5 */
+ unsigned char BusWidthBits; /* Byte 6 */
+ unsigned char FlashCodeTypeOrProductID; /* Byte 7 */
+ unsigned char NumberOfHostPortsPresent; /* Byte 8 */
+ unsigned char Reserved1[7]; /* Bytes 9-15 */
+ unsigned char BusInterfaceName[16]; /* Bytes 16-31 */
+ unsigned char ControllerName[16]; /* Bytes 32-47 */
+ unsigned char Reserved2[16]; /* Bytes 48-63 */
+ /* Firmware Release Information */
+ unsigned char FirmwareMajorVersion; /* Byte 64 */
+ unsigned char FirmwareMinorVersion; /* Byte 65 */
+ unsigned char FirmwareTurnNumber; /* Byte 66 */
+ unsigned char FirmwareBuildNumber; /* Byte 67 */
+ unsigned char FirmwareReleaseDay; /* Byte 68 */
+ unsigned char FirmwareReleaseMonth; /* Byte 69 */
+ unsigned char FirmwareReleaseYearHigh2Digits; /* Byte 70 */
+ unsigned char FirmwareReleaseYearLow2Digits; /* Byte 71 */
+ /* Hardware Release Information */
+ unsigned char HardwareRevision; /* Byte 72 */
+ unsigned int :24; /* Bytes 73-75 */
+ unsigned char HardwareReleaseDay; /* Byte 76 */
+ unsigned char HardwareReleaseMonth; /* Byte 77 */
+ unsigned char HardwareReleaseYearHigh2Digits; /* Byte 78 */
+ unsigned char HardwareReleaseYearLow2Digits; /* Byte 79 */
+ /* Hardware Manufacturing Information */
+ unsigned char ManufacturingBatchNumber; /* Byte 80 */
+ unsigned char :8; /* Byte 81 */
+ unsigned char ManufacturingPlantNumber; /* Byte 82 */
+ unsigned char :8; /* Byte 83 */
+ unsigned char HardwareManufacturingDay; /* Byte 84 */
+ unsigned char HardwareManufacturingMonth; /* Byte 85 */
+ unsigned char HardwareManufacturingYearHigh2Digits; /* Byte 86 */
+ unsigned char HardwareManufacturingYearLow2Digits; /* Byte 87 */
+ unsigned char MaximumNumberOfPDDperXLD; /* Byte 88 */
+ unsigned char MaximumNumberOfILDperXLD; /* Byte 89 */
+ unsigned short NonvolatileMemorySizeKB; /* Bytes 90-91 */
+ unsigned char MaximumNumberOfXLD; /* Byte 92 */
+ unsigned int :24; /* Bytes 93-95 */
+ /* Unique Information per Controller */
+ unsigned char ControllerSerialNumber[16]; /* Bytes 96-111 */
+ unsigned char Reserved3[16]; /* Bytes 112-127 */
+ /* Vendor Information */
+ unsigned int :24; /* Bytes 128-130 */
+ unsigned char OEM_Code; /* Byte 131 */
+ unsigned char VendorName[16]; /* Bytes 132-147 */
+ /* Other Physical/Controller/Operation Information */
+ bool BBU_Present:1; /* Byte 148 Bit 0 */
+ bool ActiveActiveClusteringMode:1; /* Byte 148 Bit 1 */
+ unsigned char :6; /* Byte 148 Bits 2-7 */
+ unsigned char :8; /* Byte 149 */
+ unsigned short :16; /* Bytes 150-151 */
+ /* Physical Device Scan Information */
+ bool PhysicalScanActive:1; /* Byte 152 Bit 0 */
+ unsigned char :7; /* Byte 152 Bits 1-7 */
+ unsigned char PhysicalDeviceChannelNumber; /* Byte 153 */
+ unsigned char PhysicalDeviceTargetID; /* Byte 154 */
+ unsigned char PhysicalDeviceLogicalUnit; /* Byte 155 */
+ /* Maximum Command Data Transfer Sizes */
+ unsigned short MaximumDataTransferSizeInBlocks; /* Bytes 156-157 */
+ unsigned short MaximumScatterGatherEntries; /* Bytes 158-159 */
+ /* Logical/Physical Device Counts */
+ unsigned short LogicalDevicesPresent; /* Bytes 160-161 */
+ unsigned short LogicalDevicesCritical; /* Bytes 162-163 */
+ unsigned short LogicalDevicesOffline; /* Bytes 164-165 */
+ unsigned short PhysicalDevicesPresent; /* Bytes 166-167 */
+ unsigned short PhysicalDisksPresent; /* Bytes 168-169 */
+ unsigned short PhysicalDisksCritical; /* Bytes 170-171 */
+ unsigned short PhysicalDisksOffline; /* Bytes 172-173 */
+ unsigned short MaximumParallelCommands; /* Bytes 174-175 */
+ /* Channel and Target ID Information */
+ unsigned char NumberOfPhysicalChannelsPresent; /* Byte 176 */
+ unsigned char NumberOfVirtualChannelsPresent; /* Byte 177 */
+ unsigned char NumberOfPhysicalChannelsPossible; /* Byte 178 */
+ unsigned char NumberOfVirtualChannelsPossible; /* Byte 179 */
+ unsigned char MaximumTargetsPerChannel[16]; /* Bytes 180-195 */
+ unsigned char Reserved4[12]; /* Bytes 196-207 */
+ /* Memory/Cache Information */
+ unsigned short MemorySizeMB; /* Bytes 208-209 */
+ unsigned short CacheSizeMB; /* Bytes 210-211 */
+ unsigned int ValidCacheSizeInBytes; /* Bytes 212-215 */
+ unsigned int DirtyCacheSizeInBytes; /* Bytes 216-219 */
+ unsigned short MemorySpeedMHz; /* Bytes 220-221 */
+ unsigned char MemoryDataWidthBits; /* Byte 222 */
+ DAC960_V2_MemoryType_T MemoryType; /* Byte 223 */
+ unsigned char CacheMemoryTypeName[16]; /* Bytes 224-239 */
+ /* Execution Memory Information */
+ unsigned short ExecutionMemorySizeMB; /* Bytes 240-241 */
+ unsigned short ExecutionL2CacheSizeMB; /* Bytes 242-243 */
+ unsigned char Reserved5[8]; /* Bytes 244-251 */
+ unsigned short ExecutionMemorySpeedMHz; /* Bytes 252-253 */
+ unsigned char ExecutionMemoryDataWidthBits; /* Byte 254 */
+ DAC960_V2_MemoryType_T ExecutionMemoryType; /* Byte 255 */
+ unsigned char ExecutionMemoryTypeName[16]; /* Bytes 256-271 */
+ /* First CPU Type Information */
+ unsigned short FirstProcessorSpeedMHz; /* Bytes 272-273 */
+ DAC960_V2_ProcessorType_T FirstProcessorType; /* Byte 274 */
+ unsigned char FirstProcessorCount; /* Byte 275 */
+ unsigned char Reserved6[12]; /* Bytes 276-287 */
+ unsigned char FirstProcessorName[16]; /* Bytes 288-303 */
+ /* Second CPU Type Information */
+ unsigned short SecondProcessorSpeedMHz; /* Bytes 304-305 */
+ DAC960_V2_ProcessorType_T SecondProcessorType; /* Byte 306 */
+ unsigned char SecondProcessorCount; /* Byte 307 */
+ unsigned char Reserved7[12]; /* Bytes 308-319 */
+ unsigned char SecondProcessorName[16]; /* Bytes 320-335 */
+ /* Debugging/Profiling/Command Time Tracing Information */
+ unsigned short CurrentProfilingDataPageNumber; /* Bytes 336-337 */
+ unsigned short ProgramsAwaitingProfilingData; /* Bytes 338-339 */
+ unsigned short CurrentCommandTimeTraceDataPageNumber; /* Bytes 340-341 */
+ unsigned short ProgramsAwaitingCommandTimeTraceData; /* Bytes 342-343 */
+ unsigned char Reserved8[8]; /* Bytes 344-351 */
+ /* Error Counters on Physical Devices */
+ unsigned short PhysicalDeviceBusResets; /* Bytes 352-353 */
+ unsigned short PhysicalDeviceParityErrors; /* Bytes 355-355 */
+ unsigned short PhysicalDeviceSoftErrors; /* Bytes 356-357 */
+ unsigned short PhysicalDeviceCommandsFailed; /* Bytes 358-359 */
+ unsigned short PhysicalDeviceMiscellaneousErrors; /* Bytes 360-361 */
+ unsigned short PhysicalDeviceCommandTimeouts; /* Bytes 362-363 */
+ unsigned short PhysicalDeviceSelectionTimeouts; /* Bytes 364-365 */
+ unsigned short PhysicalDeviceRetriesDone; /* Bytes 366-367 */
+ unsigned short PhysicalDeviceAbortsDone; /* Bytes 368-369 */
+ unsigned short PhysicalDeviceHostCommandAbortsDone; /* Bytes 370-371 */
+ unsigned short PhysicalDevicePredictedFailuresDetected; /* Bytes 372-373 */
+ unsigned short PhysicalDeviceHostCommandsFailed; /* Bytes 374-375 */
+ unsigned short PhysicalDeviceHardErrors; /* Bytes 376-377 */
+ unsigned char Reserved9[6]; /* Bytes 378-383 */
+ /* Error Counters on Logical Devices */
+ unsigned short LogicalDeviceSoftErrors; /* Bytes 384-385 */
+ unsigned short LogicalDeviceCommandsFailed; /* Bytes 386-387 */
+ unsigned short LogicalDeviceHostCommandAbortsDone; /* Bytes 388-389 */
+ unsigned short :16; /* Bytes 390-391 */
+ /* Error Counters on Controller */
+ unsigned short ControllerMemoryErrors; /* Bytes 392-393 */
+ unsigned short ControllerHostCommandAbortsDone; /* Bytes 394-395 */
+ unsigned int :32; /* Bytes 396-399 */
+ /* Long Duration Activity Information */
+ unsigned short BackgroundInitializationsActive; /* Bytes 400-401 */
+ unsigned short LogicalDeviceInitializationsActive; /* Bytes 402-403 */
+ unsigned short PhysicalDeviceInitializationsActive; /* Bytes 404-405 */
+ unsigned short ConsistencyChecksActive; /* Bytes 406-407 */
+ unsigned short RebuildsActive; /* Bytes 408-409 */
+ unsigned short OnlineExpansionsActive; /* Bytes 410-411 */
+ unsigned short PatrolActivitiesActive; /* Bytes 412-413 */
+ unsigned short :16; /* Bytes 414-415 */
+ /* Flash ROM Information */
+ unsigned char FlashType; /* Byte 416 */
+ unsigned char :8; /* Byte 417 */
+ unsigned short FlashSizeMB; /* Bytes 418-419 */
+ unsigned int FlashLimit; /* Bytes 420-423 */
+ unsigned int FlashCount; /* Bytes 424-427 */
+ unsigned int :32; /* Bytes 428-431 */
+ unsigned char FlashTypeName[16]; /* Bytes 432-447 */
+ /* Firmware Run Time Information */
+ unsigned char RebuildRate; /* Byte 448 */
+ unsigned char BackgroundInitializationRate; /* Byte 449 */
+ unsigned char ForegroundInitializationRate; /* Byte 450 */
+ unsigned char ConsistencyCheckRate; /* Byte 451 */
+ unsigned int :32; /* Bytes 452-455 */
+ unsigned int MaximumDP; /* Bytes 456-459 */
+ unsigned int FreeDP; /* Bytes 460-463 */
+ unsigned int MaximumIOP; /* Bytes 464-467 */
+ unsigned int FreeIOP; /* Bytes 468-471 */
+ unsigned short MaximumCombLengthInBlocks; /* Bytes 472-473 */
+ unsigned short NumberOfConfigurationGroups; /* Bytes 474-475 */
+ bool InstallationAbortStatus:1; /* Byte 476 Bit 0 */
+ bool MaintenanceModeStatus:1; /* Byte 476 Bit 1 */
+ unsigned int :24; /* Bytes 476-479 */
+ unsigned char Reserved10[32]; /* Bytes 480-511 */
+ unsigned char Reserved11[512]; /* Bytes 512-1023 */
+}
+DAC960_V2_ControllerInfo_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Device State type.
+*/
+
+typedef enum
+{
+ DAC960_V2_Device_Unconfigured = 0x00,
+ DAC960_V2_Device_Online = 0x01,
+ DAC960_V2_Device_Rebuild = 0x03,
+ DAC960_V2_Device_Missing = 0x04,
+ DAC960_V2_Device_SuspectedCritical = 0x05,
+ DAC960_V2_Device_Offline = 0x08,
+ DAC960_V2_Device_Critical = 0x09,
+ DAC960_V2_Device_SuspectedDead = 0x0C,
+ DAC960_V2_Device_CommandedOffline = 0x10,
+ DAC960_V2_Device_Standby = 0x21,
+ DAC960_V2_Device_InvalidState = 0xFF
+}
+__attribute__ ((packed))
+DAC960_V2_DriveState_T;
+
+/*
+ * Define the DAC960 V2 RAID Levels
+ */
+typedef enum {
+ DAC960_V2_RAID_Level0 = 0x0, /* RAID 0 */
+ DAC960_V2_RAID_Level1 = 0x1, /* RAID 1 */
+ DAC960_V2_RAID_Level3 = 0x3, /* RAID 3 right asymmetric parity */
+ DAC960_V2_RAID_Level5 = 0x5, /* RAID 5 right asymmetric parity */
+ DAC960_V2_RAID_Level6 = 0x6, /* RAID 6 (Mylex RAID 6) */
+ DAC960_V2_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */
+ DAC960_V2_RAID_NewSpan = 0x8, /* New Mylex SPAN */
+ DAC960_V2_RAID_Level3F = 0x9, /* RAID 3 fixed parity */
+ DAC960_V2_RAID_Level3L = 0xb, /* RAID 3 left symmetric parity */
+ DAC960_V2_RAID_Span = 0xc, /* current spanning implementation */
+ DAC960_V2_RAID_Level5L = 0xd, /* RAID 5 left symmetric parity */
+ DAC960_V2_RAID_LevelE = 0xe, /* RAID E (concatenation) */
+ DAC960_V2_RAID_Physical = 0xf, /* physical device */
+}
+__attribute__ ((packed))
+DAC960_V2_RAIDLevel_T;
+
+typedef enum {
+ DAC960_V2_StripeSize_0 = 0x0, /* no stripe (RAID 1, RAID 7, etc) */
+ DAC960_V2_StripeSize_512b = 0x1,
+ DAC960_V2_StripeSize_1k = 0x2,
+ DAC960_V2_StripeSize_2k = 0x3,
+ DAC960_V2_StripeSize_4k = 0x4,
+ DAC960_V2_StripeSize_8k = 0x5,
+ DAC960_V2_StripeSize_16k = 0x6,
+ DAC960_V2_StripeSize_32k = 0x7,
+ DAC960_V2_StripeSize_64k = 0x8,
+ DAC960_V2_StripeSize_128k = 0x9,
+ DAC960_V2_StripeSize_256k = 0xa,
+ DAC960_V2_StripeSize_512k = 0xb,
+ DAC960_V2_StripeSize_1m = 0xc,
+} __attribute__ ((packed))
+DAC960_V2_StripeSize_T;
+
+typedef enum {
+ DAC960_V2_Cacheline_ZERO = 0x0, /* caching cannot be enabled */
+ DAC960_V2_Cacheline_512b = 0x1,
+ DAC960_V2_Cacheline_1k = 0x2,
+ DAC960_V2_Cacheline_2k = 0x3,
+ DAC960_V2_Cacheline_4k = 0x4,
+ DAC960_V2_Cacheline_8k = 0x5,
+ DAC960_V2_Cacheline_16k = 0x6,
+ DAC960_V2_Cacheline_32k = 0x7,
+ DAC960_V2_Cacheline_64k = 0x8,
+} __attribute__ ((packed))
+DAC960_V2_CachelineSize_T;
+
+/*
+ Define the DAC960 V2 Firmware Get Logical Device Info reply structure.
+*/
+
+typedef struct DAC960_V2_LogicalDeviceInfo
+{
+ unsigned char :8; /* Byte 0 */
+ unsigned char Channel; /* Byte 1 */
+ unsigned char TargetID; /* Byte 2 */
+ unsigned char LogicalUnit; /* Byte 3 */
+ DAC960_V2_DriveState_T State; /* Byte 4 */
+ unsigned char RAIDLevel; /* Byte 5 */
+ unsigned char StripeSize; /* Byte 6 */
+ unsigned char CacheLineSize; /* Byte 7 */
+ struct {
+ enum {
+ DAC960_V2_ReadCacheDisabled = 0x0,
+ DAC960_V2_ReadCacheEnabled = 0x1,
+ DAC960_V2_ReadAheadEnabled = 0x2,
+ DAC960_V2_IntelligentReadAheadEnabled = 0x3,
+ DAC960_V2_ReadCache_Last = 0x7
+ } __attribute__ ((packed)) ReadCache:3; /* Byte 8 Bits 0-2 */
+ enum {
+ DAC960_V2_WriteCacheDisabled = 0x0,
+ DAC960_V2_LogicalDeviceReadOnly = 0x1,
+ DAC960_V2_WriteCacheEnabled = 0x2,
+ DAC960_V2_IntelligentWriteCacheEnabled = 0x3,
+ DAC960_V2_WriteCache_Last = 0x7
+ } __attribute__ ((packed)) WriteCache:3; /* Byte 8 Bits 3-5 */
+ bool rsvd1:1; /* Byte 8 Bit 6 */
+ bool LogicalDeviceInitialized:1; /* Byte 8 Bit 7 */
+ } LogicalDeviceControl; /* Byte 8 */
+ /* Logical Device Operations Status */
+ bool ConsistencyCheckInProgress:1; /* Byte 9 Bit 0 */
+ bool RebuildInProgress:1; /* Byte 9 Bit 1 */
+ bool BackgroundInitializationInProgress:1; /* Byte 9 Bit 2 */
+ bool ForegroundInitializationInProgress:1; /* Byte 9 Bit 3 */
+ bool DataMigrationInProgress:1; /* Byte 9 Bit 4 */
+ bool PatrolOperationInProgress:1; /* Byte 9 Bit 5 */
+ unsigned char rsvd2:2; /* Byte 9 Bits 6-7 */
+ unsigned char RAID5WriteUpdate; /* Byte 10 */
+ unsigned char RAID5Algorithm; /* Byte 11 */
+ unsigned short LogicalDeviceNumber; /* Bytes 12-13 */
+ /* BIOS Info */
+ bool BIOSDisabled:1; /* Byte 14 Bit 0 */
+ bool CDROMBootEnabled:1; /* Byte 14 Bit 1 */
+ bool DriveCoercionEnabled:1; /* Byte 14 Bit 2 */
+ bool WriteSameDisabled:1; /* Byte 14 Bit 3 */
+ bool HBA_ModeEnabled:1; /* Byte 14 Bit 4 */
+ enum {
+ DAC960_V2_Geometry_128_32 = 0x0,
+ DAC960_V2_Geometry_255_63 = 0x1,
+ DAC960_V2_Geometry_Reserved1 = 0x2,
+ DAC960_V2_Geometry_Reserved2 = 0x3
+ } __attribute__ ((packed)) DriveGeometry:2; /* Byte 14 Bits 5-6 */
+ bool SuperReadAheadEnabled:1; /* Byte 14 Bit 7 */
+ unsigned char rsvd3:8; /* Byte 15 */
+ /* Error Counters */
+ unsigned short SoftErrors; /* Bytes 16-17 */
+ unsigned short CommandsFailed; /* Bytes 18-19 */
+ unsigned short HostCommandAbortsDone; /* Bytes 20-21 */
+ unsigned short DeferredWriteErrors; /* Bytes 22-23 */
+ unsigned int rsvd4:32; /* Bytes 24-27 */
+ unsigned int rsvd5:32; /* Bytes 28-31 */
+ /* Device Size Information */
+ unsigned short rsvd6:16; /* Bytes 32-33 */
+ unsigned short DeviceBlockSizeInBytes; /* Bytes 34-35 */
+ unsigned int OriginalDeviceSize; /* Bytes 36-39 */
+ unsigned int ConfigurableDeviceSize; /* Bytes 40-43 */
+ unsigned int rsvd7:32; /* Bytes 44-47 */
+ unsigned char LogicalDeviceName[32]; /* Bytes 48-79 */
+ unsigned char SCSI_InquiryData[36]; /* Bytes 80-115 */
+ unsigned char Reserved1[12]; /* Bytes 116-127 */
+ u64 LastReadBlockNumber; /* Bytes 128-135 */
+ u64 LastWrittenBlockNumber; /* Bytes 136-143 */
+ u64 ConsistencyCheckBlockNumber; /* Bytes 144-151 */
+ u64 RebuildBlockNumber; /* Bytes 152-159 */
+ u64 BackgroundInitializationBlockNumber; /* Bytes 160-167 */
+ u64 ForegroundInitializationBlockNumber; /* Bytes 168-175 */
+ u64 DataMigrationBlockNumber; /* Bytes 176-183 */
+ u64 PatrolOperationBlockNumber; /* Bytes 184-191 */
+ unsigned char rsvd8[64]; /* Bytes 192-255 */
+}
+DAC960_V2_LogicalDeviceInfo_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Get Physical Device Info reply structure.
+*/
+
+typedef struct DAC960_V2_PhysicalDeviceInfo
+{
+ unsigned char :8; /* Byte 0 */
+ unsigned char Channel; /* Byte 1 */
+ unsigned char TargetID; /* Byte 2 */
+ unsigned char LogicalUnit; /* Byte 3 */
+ /* Configuration Status Bits */
+ bool PhysicalDeviceFaultTolerant:1; /* Byte 4 Bit 0 */
+ bool PhysicalDeviceConnected:1; /* Byte 4 Bit 1 */
+ bool PhysicalDeviceLocalToController:1; /* Byte 4 Bit 2 */
+ unsigned char :5; /* Byte 4 Bits 3-7 */
+ /* Multiple Host/Controller Status Bits */
+ bool RemoteHostSystemDead:1; /* Byte 5 Bit 0 */
+ bool RemoteControllerDead:1; /* Byte 5 Bit 1 */
+ unsigned char :6; /* Byte 5 Bits 2-7 */
+ DAC960_V2_DriveState_T State; /* Byte 6 */
+ unsigned char NegotiatedDataWidthBits; /* Byte 7 */
+ unsigned short NegotiatedSynchronousMegaTransfers; /* Bytes 8-9 */
+ /* Multiported Physical Device Information */
+ unsigned char NumberOfPortConnections; /* Byte 10 */
+ unsigned char DriveAccessibilityBitmap; /* Byte 11 */
+ unsigned int :32; /* Bytes 12-15 */
+ unsigned char NetworkAddress[16]; /* Bytes 16-31 */
+ unsigned short MaximumTags; /* Bytes 32-33 */
+ /* Physical Device Operations Status */
+ bool ConsistencyCheckInProgress:1; /* Byte 34 Bit 0 */
+ bool RebuildInProgress:1; /* Byte 34 Bit 1 */
+ bool MakingDataConsistentInProgress:1; /* Byte 34 Bit 2 */
+ bool PhysicalDeviceInitializationInProgress:1; /* Byte 34 Bit 3 */
+ bool DataMigrationInProgress:1; /* Byte 34 Bit 4 */
+ bool PatrolOperationInProgress:1; /* Byte 34 Bit 5 */
+ unsigned char :2; /* Byte 34 Bits 6-7 */
+ unsigned char LongOperationStatus; /* Byte 35 */
+ unsigned char ParityErrors; /* Byte 36 */
+ unsigned char SoftErrors; /* Byte 37 */
+ unsigned char HardErrors; /* Byte 38 */
+ unsigned char MiscellaneousErrors; /* Byte 39 */
+ unsigned char CommandTimeouts; /* Byte 40 */
+ unsigned char Retries; /* Byte 41 */
+ unsigned char Aborts; /* Byte 42 */
+ unsigned char PredictedFailuresDetected; /* Byte 43 */
+ unsigned int :32; /* Bytes 44-47 */
+ unsigned short :16; /* Bytes 48-49 */
+ unsigned short DeviceBlockSizeInBytes; /* Bytes 50-51 */
+ unsigned int OriginalDeviceSize; /* Bytes 52-55 */
+ unsigned int ConfigurableDeviceSize; /* Bytes 56-59 */
+ unsigned int :32; /* Bytes 60-63 */
+ unsigned char PhysicalDeviceName[16]; /* Bytes 64-79 */
+ unsigned char Reserved1[16]; /* Bytes 80-95 */
+ unsigned char Reserved2[32]; /* Bytes 96-127 */
+ unsigned char SCSI_InquiryData[36]; /* Bytes 128-163 */
+ unsigned char Reserved3[20]; /* Bytes 164-183 */
+ unsigned char Reserved4[8]; /* Bytes 184-191 */
+ u64 LastReadBlockNumber; /* Bytes 192-199 */
+ u64 LastWrittenBlockNumber; /* Bytes 200-207 */
+ u64 ConsistencyCheckBlockNumber; /* Bytes 208-215 */
+ u64 RebuildBlockNumber; /* Bytes 216-223 */
+ u64 MakingDataConsistentBlockNumber; /* Bytes 224-231 */
+ u64 DeviceInitializationBlockNumber; /* Bytes 232-239 */
+ u64 DataMigrationBlockNumber; /* Bytes 240-247 */
+ u64 PatrolOperationBlockNumber; /* Bytes 248-255 */
+ unsigned char Reserved5[256]; /* Bytes 256-511 */
+}
+DAC960_V2_PhysicalDeviceInfo_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Health Status Buffer structure.
+*/
+
+typedef struct DAC960_V2_HealthStatusBuffer
+{
+ unsigned int MicrosecondsFromControllerStartTime; /* Bytes 0-3 */
+ unsigned int MillisecondsFromControllerStartTime; /* Bytes 4-7 */
+ unsigned int SecondsFrom1January1970; /* Bytes 8-11 */
+ unsigned int :32; /* Bytes 12-15 */
+ unsigned int StatusChangeCounter; /* Bytes 16-19 */
+ unsigned int :32; /* Bytes 20-23 */
+ unsigned int DebugOutputMessageBufferIndex; /* Bytes 24-27 */
+ unsigned int CodedMessageBufferIndex; /* Bytes 28-31 */
+ unsigned int CurrentTimeTracePageNumber; /* Bytes 32-35 */
+ unsigned int CurrentProfilerPageNumber; /* Bytes 36-39 */
+ unsigned int NextEventSequenceNumber; /* Bytes 40-43 */
+ unsigned int :32; /* Bytes 44-47 */
+ unsigned char Reserved1[16]; /* Bytes 48-63 */
+ unsigned char Reserved2[64]; /* Bytes 64-127 */
+}
+DAC960_V2_HealthStatusBuffer_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Get Event reply structure.
+*/
+
+typedef struct DAC960_V2_Event
+{
+ unsigned int EventSequenceNumber; /* Bytes 0-3 */
+ unsigned int EventTime; /* Bytes 4-7 */
+ unsigned int EventCode; /* Bytes 8-11 */
+ unsigned char :8; /* Byte 12 */
+ unsigned char Channel; /* Byte 13 */
+ unsigned char TargetID; /* Byte 14 */
+ unsigned char LogicalUnit; /* Byte 15 */
+ unsigned int :32; /* Bytes 16-19 */
+ unsigned int EventSpecificParameter; /* Bytes 20-23 */
+ unsigned char RequestSenseData[40]; /* Bytes 24-63 */
+}
+DAC960_V2_Event_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Command Control Bits structure.
+*/
+
+typedef struct DAC960_V2_CommandControlBits
+{
+ bool ForceUnitAccess:1; /* Byte 0 Bit 0 */
+ bool DisablePageOut:1; /* Byte 0 Bit 1 */
+ bool rsvd1:1; /* Byte 0 Bit 2 */
+ bool AdditionalScatterGatherListMemory:1; /* Byte 0 Bit 3 */
+ bool DataTransferControllerToHost:1; /* Byte 0 Bit 4 */
+ bool rsvd2:1; /* Byte 0 Bit 5 */
+ bool NoAutoRequestSense:1; /* Byte 0 Bit 6 */
+ bool DisconnectProhibited:1; /* Byte 0 Bit 7 */
+}
+DAC960_V2_CommandControlBits_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Command Timeout structure.
+*/
+
+typedef struct DAC960_V2_CommandTimeout
+{
+ unsigned char TimeoutValue:6; /* Byte 0 Bits 0-5 */
+ enum {
+ DAC960_V2_TimeoutScale_Seconds = 0,
+ DAC960_V2_TimeoutScale_Minutes = 1,
+ DAC960_V2_TimeoutScale_Hours = 2,
+ DAC960_V2_TimeoutScale_Reserved = 3
+ } __attribute__ ((packed)) TimeoutScale:2; /* Byte 0 Bits 6-7 */
+}
+DAC960_V2_CommandTimeout_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Physical Device structure.
+*/
+
+typedef struct DAC960_V2_PhysicalDevice
+{
+ unsigned char LogicalUnit; /* Byte 0 */
+ unsigned char TargetID; /* Byte 1 */
+ unsigned char Channel:3; /* Byte 2 Bits 0-2 */
+ unsigned char Controller:5; /* Byte 2 Bits 3-7 */
+}
+__attribute__ ((packed))
+DAC960_V2_PhysicalDevice_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Logical Device structure.
+*/
+
+typedef struct DAC960_V2_LogicalDevice
+{
+ unsigned short LogicalDeviceNumber; /* Bytes 0-1 */
+ unsigned char :3; /* Byte 2 Bits 0-2 */
+ unsigned char Controller:5; /* Byte 2 Bits 3-7 */
+}
+__attribute__ ((packed))
+DAC960_V2_LogicalDevice_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Operation Device type.
+*/
+
+typedef enum
+{
+ DAC960_V2_Physical_Device = 0x00,
+ DAC960_V2_RAID_Device = 0x01,
+ DAC960_V2_Physical_Channel = 0x02,
+ DAC960_V2_RAID_Channel = 0x03,
+ DAC960_V2_Physical_Controller = 0x04,
+ DAC960_V2_RAID_Controller = 0x05,
+ DAC960_V2_Configuration_Group = 0x10,
+ DAC960_V2_Enclosure = 0x11
+}
+__attribute__ ((packed))
+DAC960_V2_OperationDevice_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Translate Physical To Logical Device structure.
+*/
+
+typedef struct DAC960_V2_PhysicalToLogicalDevice
+{
+ unsigned short LogicalDeviceNumber; /* Bytes 0-1 */
+ unsigned short :16; /* Bytes 2-3 */
+ unsigned char PreviousBootController; /* Byte 4 */
+ unsigned char PreviousBootChannel; /* Byte 5 */
+ unsigned char PreviousBootTargetID; /* Byte 6 */
+ unsigned char PreviousBootLogicalUnit; /* Byte 7 */
+}
+DAC960_V2_PhysicalToLogicalDevice_T;
+
+
+
+/*
+ Define the DAC960 V2 Firmware Scatter/Gather List Entry structure.
+*/
+
+typedef struct DAC960_V2_ScatterGatherSegment
+{
+ u64 SegmentDataPointer; /* Bytes 0-7 */
+ u64 SegmentByteCount; /* Bytes 8-15 */
+}
+DAC960_V2_ScatterGatherSegment_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Data Transfer Memory Address structure.
+*/
+
+typedef union DAC960_V2_DataTransferMemoryAddress
+{
+ DAC960_V2_ScatterGatherSegment_T ScatterGatherSegments[2]; /* Bytes 0-31 */
+ struct {
+ unsigned short ScatterGatherList0Length; /* Bytes 0-1 */
+ unsigned short ScatterGatherList1Length; /* Bytes 2-3 */
+ unsigned short ScatterGatherList2Length; /* Bytes 4-5 */
+ unsigned short :16; /* Bytes 6-7 */
+ u64 ScatterGatherList0Address; /* Bytes 8-15 */
+ u64 ScatterGatherList1Address; /* Bytes 16-23 */
+ u64 ScatterGatherList2Address; /* Bytes 24-31 */
+ } ExtendedScatterGather;
+}
+DAC960_V2_DataTransferMemoryAddress_T;
+
+
+/*
+ Define the 64 Byte DAC960 V2 Firmware Command Mailbox structure.
+*/
+
+typedef union DAC960_V2_CommandMailbox
+{
+ unsigned int Words[16]; /* Words 0-15 */
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned int :24; /* Bytes 16-18 */
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char IOCTL_Opcode; /* Byte 21 */
+ unsigned char Reserved[10]; /* Bytes 22-31 */
+ DAC960_V2_DataTransferMemoryAddress_T dma_addr; /* Bytes 32-63 */
+ } Common;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ u32 dma_size; /* Bytes 4-7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char CDBLength; /* Byte 21 */
+ unsigned char SCSI_CDB[10]; /* Bytes 22-31 */
+ DAC960_V2_DataTransferMemoryAddress_T dma_addr; /* Bytes 32-63 */
+ } SCSI_10;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ u32 dma_size; /* Bytes 4-7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char CDBLength; /* Byte 21 */
+ unsigned short :16; /* Bytes 22-23 */
+ u64 SCSI_CDB_BusAddress; /* Bytes 24-31 */
+ DAC960_V2_DataTransferMemoryAddress_T dma_addr; /* Bytes 32-63 */
+ } SCSI_255;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned short :16; /* Bytes 16-17 */
+ unsigned char ControllerNumber; /* Byte 18 */
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char IOCTL_Opcode; /* Byte 21 */
+ unsigned char Reserved[10]; /* Bytes 22-31 */
+ DAC960_V2_DataTransferMemoryAddress_T dma_addr; /* Bytes 32-63 */
+ } ControllerInfo;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char IOCTL_Opcode; /* Byte 21 */
+ unsigned char Reserved[10]; /* Bytes 22-31 */
+ DAC960_V2_DataTransferMemoryAddress_T dma_addr; /* Bytes 32-63 */
+ } LogicalDeviceInfo;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char IOCTL_Opcode; /* Byte 21 */
+ unsigned char Reserved[10]; /* Bytes 22-31 */
+ DAC960_V2_DataTransferMemoryAddress_T dma_addr; /* Bytes 32-63 */
+ } PhysicalDeviceInfo;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned short EventSequenceNumberHigh16; /* Bytes 16-17 */
+ unsigned char ControllerNumber; /* Byte 18 */
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char IOCTL_Opcode; /* Byte 21 */
+ unsigned short EventSequenceNumberLow16; /* Bytes 22-23 */
+ unsigned char Reserved[8]; /* Bytes 24-31 */
+ DAC960_V2_DataTransferMemoryAddress_T dma_addr; /* Bytes 32-63 */
+ } GetEvent;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ union {
+ DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
+ DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
+ };
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char IOCTL_Opcode; /* Byte 21 */
+ DAC960_V2_DriveState_T State;
+ unsigned char Reserved[9]; /* Bytes 23-31 */
+ DAC960_V2_DataTransferMemoryAddress_T dma_addr; /* Bytes 32-63 */
+ } SetDeviceState;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char IOCTL_Opcode; /* Byte 21 */
+ bool RestoreConsistency:1; /* Byte 22 Bit 0 */
+ bool InitializedAreaOnly:1; /* Byte 22 Bit 1 */
+ unsigned char :6; /* Byte 22 Bits 2-7 */
+ unsigned char Reserved[9]; /* Bytes 23-31 */
+ DAC960_V2_DataTransferMemoryAddress_T dma_addr; /* Bytes 32-63 */
+ } ConsistencyCheck;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ unsigned char FirstCommandMailboxSizeKB; /* Byte 4 */
+ unsigned char FirstStatusMailboxSizeKB; /* Byte 5 */
+ unsigned char SecondCommandMailboxSizeKB; /* Byte 6 */
+ unsigned char SecondStatusMailboxSizeKB; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned int :24; /* Bytes 16-18 */
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char IOCTL_Opcode; /* Byte 21 */
+ unsigned char HealthStatusBufferSizeKB; /* Byte 22 */
+ unsigned char :8; /* Byte 23 */
+ u64 HealthStatusBufferBusAddress; /* Bytes 24-31 */
+ u64 FirstCommandMailboxBusAddress; /* Bytes 32-39 */
+ u64 FirstStatusMailboxBusAddress; /* Bytes 40-47 */
+ u64 SecondCommandMailboxBusAddress; /* Bytes 48-55 */
+ u64 SecondStatusMailboxBusAddress; /* Bytes 56-63 */
+ } SetMemoryMailbox;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ DAC960_V2_CommandOpcode_T opcode; /* Byte 2 */
+ DAC960_V2_CommandControlBits_T control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
+ DAC960_V2_CommandTimeout_T tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char IOCTL_Opcode; /* Byte 21 */
+ DAC960_V2_OperationDevice_T OperationDevice; /* Byte 22 */
+ unsigned char Reserved[9]; /* Bytes 23-31 */
+ DAC960_V2_DataTransferMemoryAddress_T dma_addr; /* Bytes 32-63 */
+ } DeviceOperation;
+}
+DAC960_V2_CommandMailbox_T;
+
+
+/*
+ Define the DAC960 Driver IOCTL requests.
+*/
+
+#define DAC960_IOCTL_GET_CONTROLLER_COUNT 0xDAC001
+#define DAC960_IOCTL_GET_CONTROLLER_INFO 0xDAC002
+#define DAC960_IOCTL_V1_EXECUTE_COMMAND 0xDAC003
+#define DAC960_IOCTL_V2_EXECUTE_COMMAND 0xDAC004
+#define DAC960_IOCTL_V2_GET_HEALTH_STATUS 0xDAC005
+
+
+/*
+ Define the DAC960_IOCTL_GET_CONTROLLER_INFO reply structure.
+*/
+
+typedef struct DAC960_ControllerInfo
+{
+ unsigned char ControllerNumber;
+ unsigned char FirmwareType;
+ unsigned char Channels;
+ unsigned char Targets;
+ unsigned char PCI_Bus;
+ unsigned char PCI_Device;
+ unsigned char PCI_Function;
+ unsigned char IRQ_Channel;
+ phys_addr_t PCI_Address;
+ unsigned char ModelName[20];
+ unsigned char FirmwareVersion[12];
+}
+DAC960_ControllerInfo_T;
+
+
+/*
+ Define the User Mode DAC960_IOCTL_V2_GET_HEALTH_STATUS request structure.
+*/
+
+typedef struct DAC960_V2_GetHealthStatus
+{
+ unsigned char ControllerNumber;
+ DAC960_V2_HealthStatusBuffer_T __user *HealthStatusBuffer;
+}
+DAC960_V2_GetHealthStatus_T;
+
+
+/*
+ Define the maximum Driver Queue Depth and Controller Queue Depth supported
+ by DAC960 V1 and V2 Firmware Controllers.
+*/
+
+#define DAC960_MaxDriverQueueDepth 511
+#define DAC960_MaxControllerQueueDepth 512
+
+
+/*
+ Define the maximum number of Scatter/Gather Segments supported for any
+ DAC960 V1 and V2 Firmware controller.
+*/
+
+#define DAC960_V1_ScatterGatherLimit 32
+#define DAC960_V2_ScatterGatherLimit 128
+
+
+/*
+ Define the number of Command Mailboxes and Status Mailboxes used by the
+ DAC960 V1 and V2 Firmware Memory Mailbox Interface.
+*/
+
+#define DAC960_V1_CommandMailboxCount 256
+#define DAC960_V1_StatusMailboxCount 1024
+#define DAC960_V2_CommandMailboxCount 512
+#define DAC960_V2_StatusMailboxCount 512
+
+
+/*
+ Define the DAC960 Controller Monitoring Timer Interval.
+*/
+
+#define DAC960_MonitoringTimerInterval (10 * HZ)
+
+
+/*
+ Define the DAC960 Controller Secondary Monitoring Interval.
+*/
+
+#define DAC960_SecondaryMonitoringInterval (60 * HZ)
+
+
+/*
+ Define the DAC960 Controller Health Status Monitoring Interval.
+*/
+
+#define DAC960_HealthStatusMonitoringInterval (1 * HZ)
+
+
+/*
+ Define the DAC960 Controller Progress Reporting Interval.
+*/
+
+#define DAC960_ProgressReportingInterval (60 * HZ)
+
+/*
+ Define the DAC960 Controller fixed Block Size and Block Size Bits.
+*/
+
+#define DAC960_BlockSize 512
+#define DAC960_BlockSizeBits 9
+
+
+/*
+ Define the Controller Line Buffer, Progress Buffer, User Message, and
+ Initial Status Buffer sizes.
+*/
+
+#define DAC960_LineBufferSize 100
+
+#define DAC960_V2_DCDB_SIZE 16
+#define DAC960_V2_SENSE_BUFFERSIZE 14
+
+/*
+ Define the DAC960 Controller Firmware Types.
+*/
+
+typedef enum
+{
+ DAC960_V1_Controller = 1,
+ DAC960_V2_Controller = 2
+}
+DAC960_FirmwareType_T;
+
+
+/*
+ Define the DAC960 Controller Hardware Types.
+*/
+
+typedef enum
+{
+ DAC960_BA_Controller = 1, /* eXtremeRAID 2000 */
+ DAC960_LP_Controller = 2, /* AcceleRAID 352 */
+ DAC960_LA_Controller = 3, /* DAC1164P */
+ DAC960_PG_Controller = 4, /* DAC960PTL/PJ/PG */
+ DAC960_PD_Controller = 5, /* DAC960PU/PD/PL/P */
+ DAC960_P_Controller = 6, /* DAC960PU/PD/PL/P */
+ DAC960_GEM_Controller = 7, /* AcceleRAID 4/5/600 */
+}
+DAC960_HardwareType_T;
+
+struct DAC960_privdata {
+ DAC960_HardwareType_T HardwareType;
+ DAC960_FirmwareType_T FirmwareType;
+ irq_handler_t InterruptHandler;
+ unsigned int MemoryWindowSize;
+};
+
+
+/*
+ Define the DAC960 V1 Firmware Controller Status Mailbox structure.
+*/
+
+typedef struct DAC960_V1_StatusMailbox
+{
+ unsigned char id; /* Byte 0 */
+ unsigned char rsvd:7; /* Byte 1 Bits 0-6 */
+ bool valid:1; /* Byte 1 Bit 7 */
+ unsigned short status; /* Bytes 2-3 */
+}
+DAC960_V1_StatusMailbox_T;
+
+
+/*
+ Define the DAC960 V2 Firmware Controller Status Mailbox structure.
+*/
+
+typedef struct DAC960_V2_StatusMailbox
+{
+ unsigned short id; /* Bytes 0-1 */
+ unsigned char status; /* Byte 2 */
+ unsigned char sense_len; /* Byte 3 */
+ int residual; /* Bytes 4-7 */
+}
+DAC960_V2_StatusMailbox_T;
+
+#define DAC960_DirectCommandIdentifier 1
+#define DAC960_MonitoringIdentifier 2
+
+typedef struct DAC960_V1_CommandBlock
+{
+ DAC960_V1_CommandMailbox_T mbox;
+ unsigned short status;
+ struct completion *Completion;
+ DAC960_V1_DCDB_T *DCDB;
+ dma_addr_t DCDB_dma;
+ DAC960_V1_ScatterGatherSegment_T *sgl;
+ dma_addr_t sgl_addr;
+} DAC960_V1_CommandBlock_T;
+
+typedef struct DAC960_V2_CommandBlock
+{
+ DAC960_V2_CommandMailbox_T mbox;
+ unsigned char status;
+ unsigned char sense_len;
+ int residual;
+ struct completion *Completion;
+ DAC960_V2_ScatterGatherSegment_T *sgl;
+ dma_addr_t sgl_addr;
+ unsigned char *DCDB;
+ dma_addr_t DCDB_dma;
+ unsigned char *sense;
+ dma_addr_t sense_addr;
+} DAC960_V2_CommandBlock_T;
+
+/*
+ Define the DAC960 Driver Controller structure.
+*/
+
+typedef struct DAC960_Controller
+{
+ void __iomem *BaseAddress;
+ void __iomem *MemoryMappedAddress;
+ DAC960_FirmwareType_T FirmwareType;
+ DAC960_HardwareType_T HardwareType;
+ phys_addr_t IO_Address;
+ phys_addr_t PCI_Address;
+ struct pci_dev *PCIDevice;
+ struct Scsi_Host *host;
+ unsigned char ControllerNumber;
+ unsigned char ControllerName[4];
+ unsigned char ModelName[20];
+ unsigned char FullModelName[28];
+ unsigned char FirmwareVersion[12];
+ unsigned char Bus;
+ unsigned char Device;
+ unsigned char Function;
+ unsigned char IRQ_Channel;
+ unsigned char MemorySize;
+ unsigned char LogicalDriveCount;
+ unsigned char PhysicalChannelCount;
+ unsigned char PhysicalChannelMax;
+ unsigned char LogicalChannelCount;
+ unsigned char LogicalChannelMax;
+ unsigned short ControllerQueueDepth;
+ unsigned short ControllerScatterGatherLimit;
+ u64 BounceBufferLimit;
+ struct dma_loaf DmaPages;
+ unsigned long PrimaryMonitoringTime;
+ unsigned long SecondaryMonitoringTime;
+ unsigned long ShutdownMonitoringTimer;
+ unsigned long LastProgressReportTime;
+ unsigned long LastCurrentStatusTime;
+ bool DriveSpinUpMessageDisplayed;
+ bool SuppressEnclosureMessages;
+ struct workqueue_struct *work_q;
+ struct delayed_work monitor_work;
+ struct pci_pool *ScatterGatherPool;
+ spinlock_t queue_lock;
+ char work_q_name[20];
+ int (*ReadControllerConfiguration)(struct DAC960_Controller *);
+ void (*DisableInterrupts)(void __iomem *);
+ void (*Reset)(void __iomem *);
+ union {
+ struct {
+ unsigned int LogicalBlockSize;
+ unsigned char GeometryTranslationHeads;
+ unsigned char GeometryTranslationSectors;
+ unsigned char PendingRebuildFlag;
+ unsigned char BusWidth;
+ unsigned short StripeSize;
+ unsigned short SegmentSize;
+ unsigned short NewEventLogSequenceNumber;
+ unsigned short OldEventLogSequenceNumber;
+ bool DualModeMemoryMailboxInterface;
+ bool BackgroundInitializationStatusSupported;
+ bool SAFTE_EnclosureManagementEnabled;
+ bool NeedLogicalDeviceInfo;
+ bool NeedErrorTableInformation;
+ bool NeedRebuildProgress;
+ bool NeedConsistencyCheckProgress;
+ bool NeedBackgroundInitializationStatus;
+ bool RebuildProgressFirst;
+ bool RebuildFlagPending;
+ bool RebuildStatusPending;
+ struct pci_pool *DCDBPool;
+
+ void (*QueueCommand)(struct DAC960_Controller *,
+ DAC960_V1_CommandBlock_T *);
+ void (*WriteCommandMailbox)(DAC960_V1_CommandMailbox_T *,
+ DAC960_V1_CommandMailbox_T *);
+ void (*MailboxNewCommand)(void __iomem *);
+
+ dma_addr_t FirstCommandMailboxDMA;
+ DAC960_V1_CommandMailbox_T *FirstCommandMailbox;
+ DAC960_V1_CommandMailbox_T *LastCommandMailbox;
+ DAC960_V1_CommandMailbox_T *NextCommandMailbox;
+ DAC960_V1_CommandMailbox_T *PreviousCommandMailbox1;
+ DAC960_V1_CommandMailbox_T *PreviousCommandMailbox2;
+
+ dma_addr_t FirstStatusMailboxDMA;
+ DAC960_V1_StatusMailbox_T *FirstStatusMailbox;
+ DAC960_V1_StatusMailbox_T *LastStatusMailbox;
+ DAC960_V1_StatusMailbox_T *NextStatusMailbox;
+
+ DAC960_V1_CommandBlock_T DirectCommandBlock;
+ DAC960_V1_CommandBlock_T MonitoringCommandBlock;
+ struct mutex dcmd_mutex;
+
+ DAC960_V1_Enquiry_T Enquiry;
+ DAC960_V1_Enquiry_T *NewEnquiry;
+ dma_addr_t NewEnquiryDMA;
+
+ DAC960_V1_ErrorTable_T ErrorTable;
+ DAC960_V1_ErrorTable_T *NewErrorTable;
+ dma_addr_t NewErrorTableDMA;
+
+ DAC960_V1_EventLogEntry_T *EventLogEntry;
+ dma_addr_t EventLogEntryDMA;
+
+ DAC960_V1_RebuildProgress_T *RebuildProgress;
+ dma_addr_t RebuildProgressDMA;
+ unsigned short LastRebuildStatus;
+
+ DAC960_V1_LogicalDeviceInfoArray_T *LogicalDeviceInfo;
+ dma_addr_t LogicalDeviceInfoDMA;
+
+ DAC960_V1_BackgroundInitializationStatus_T
+ *BackgroundInitializationStatus;
+ dma_addr_t BackgroundInitializationStatusDMA;
+ DAC960_V1_BackgroundInitializationStatus_T
+ LastBackgroundInitializationStatus;
+
+ DAC960_V1_DeviceState_T *NewDeviceState;
+ dma_addr_t NewDeviceStateDMA;
+ struct mutex dma_mutex;
+ } V1;
+ struct {
+ unsigned int StatusChangeCounter;
+ unsigned int NextEventSequenceNumber;
+ /* Monitor flags */
+ bool NeedControllerInformation;
+ struct pci_pool *RequestSensePool;
+ struct pci_pool *DCDBPool;
+
+ void (*QueueCommand)(struct DAC960_Controller *,
+ DAC960_V2_CommandBlock_T *);
+ void (*WriteCommandMailbox)(DAC960_V2_CommandMailbox_T *,
+ DAC960_V2_CommandMailbox_T *);
+ void (*MailboxNewCommand)(void __iomem *);
+
+ dma_addr_t FirstCommandMailboxDMA;
+ DAC960_V2_CommandMailbox_T *FirstCommandMailbox;
+ DAC960_V2_CommandMailbox_T *LastCommandMailbox;
+ DAC960_V2_CommandMailbox_T *NextCommandMailbox;
+ DAC960_V2_CommandMailbox_T *PreviousCommandMailbox1;
+ DAC960_V2_CommandMailbox_T *PreviousCommandMailbox2;
+
+ dma_addr_t FirstStatusMailboxDMA;
+ DAC960_V2_StatusMailbox_T *FirstStatusMailbox;
+ DAC960_V2_StatusMailbox_T *LastStatusMailbox;
+ DAC960_V2_StatusMailbox_T *NextStatusMailbox;
+
+ DAC960_V2_CommandBlock_T DirectCommandBlock;
+ DAC960_V2_CommandBlock_T MonitoringCommandBlock;
+ struct mutex dcmd_mutex;
+
+ dma_addr_t HealthStatusBufferDMA;
+ DAC960_V2_HealthStatusBuffer_T *HealthStatusBuffer;
+
+ DAC960_V2_ControllerInfo_T ControllerInformation;
+ DAC960_V2_ControllerInfo_T *NewControllerInformation;
+ dma_addr_t NewControllerInformationDMA;
+ struct mutex cinfo_mutex;
+
+ DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInformation;
+ dma_addr_t NewLogicalDeviceInformationDMA;
+
+ DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInformation;
+ dma_addr_t NewPhysicalDeviceInformationDMA;
+
+ DAC960_V2_Event_T *Event;
+ dma_addr_t EventDMA;
+
+ DAC960_V2_PhysicalToLogicalDevice_T *PhysicalToLogicalDevice;
+ dma_addr_t PhysicalToLogicalDeviceDMA;
+ } V2;
+ } FW;
+} DAC960_Controller_T;
+
+
+/*
+ Simplify access to Firmware Version Dependent Data Structure Components
+ and Functions.
+*/
+
+#define V1 FW.V1
+#define V2 FW.V2
+#define DAC960_ReadControllerConfiguration(Controller) \
+ (Controller->ReadControllerConfiguration)(Controller)
+#define DAC960_DisableInterrupts(Controller) \
+ (Controller->DisableInterrupts)(Controller->BaseAddress)
+
+/*
+ * dma_addr_writeql is provided to write dma_addr_t types
+ * to a 64-bit pci address space register. The controller
+ * will accept having the register written as two 32-bit
+ * values.
+ *
+ * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
+ * without HIGHMEM, dma_addr_t is a 32-bit value.
+ *
+ * The compiler should always fix up the assignment
+ * to u.wq appropriately, depending upon the size of
+ * dma_addr_t.
+ */
+static inline
+void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
+{
+ union {
+ u64 wq;
+ uint wl[2];
+ } u;
+
+ u.wq = addr;
+
+ writel(u.wl[0], write_address);
+ writel(u.wl[1], write_address + 4);
+}
+
+/*
+ Define the DAC960 GEM Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_GEM_RegisterWindowSize 0x600
+
+typedef enum
+{
+ DAC960_GEM_InboundDoorBellRegisterReadSetOffset = 0x214,
+ DAC960_GEM_InboundDoorBellRegisterClearOffset = 0x218,
+ DAC960_GEM_OutboundDoorBellRegisterReadSetOffset = 0x224,
+ DAC960_GEM_OutboundDoorBellRegisterClearOffset = 0x228,
+ DAC960_GEM_InterruptStatusRegisterOffset = 0x208,
+ DAC960_GEM_InterruptMaskRegisterReadSetOffset = 0x22C,
+ DAC960_GEM_InterruptMaskRegisterClearOffset = 0x230,
+ DAC960_GEM_CommandMailboxBusAddressOffset = 0x510,
+ DAC960_GEM_CommandStatusOffset = 0x518,
+ DAC960_GEM_ErrorStatusRegisterReadSetOffset = 0x224,
+ DAC960_GEM_ErrorStatusRegisterClearOffset = 0x228,
+}
+DAC960_GEM_RegisterOffsets_T;
+
+/*
+ Define the structure of the DAC960 GEM Series Inbound Door Bell
+ */
+
+typedef union DAC960_GEM_InboundDoorBellRegister
+{
+ unsigned int All;
+ struct {
+ unsigned int :24;
+ bool HardwareMailboxNewCommand:1;
+ bool AcknowledgeHardwareMailboxStatus:1;
+ bool GenerateInterrupt:1;
+ bool ControllerReset:1;
+ bool MemoryMailboxNewCommand:1;
+ unsigned int :3;
+ } Write;
+ struct {
+ unsigned int :24;
+ bool HardwareMailboxFull:1;
+ bool InitializationInProgress:1;
+ unsigned int :6;
+ } Read;
+}
+DAC960_GEM_InboundDoorBellRegister_T;
+
+/*
+ Define the structure of the DAC960 GEM Series Outbound Door Bell Register.
+ */
+typedef union DAC960_GEM_OutboundDoorBellRegister
+{
+ unsigned int All;
+ struct {
+ unsigned int :24;
+ bool AcknowledgeHardwareMailboxInterrupt:1;
+ bool AcknowledgeMemoryMailboxInterrupt:1;
+ unsigned int :6;
+ } Write;
+ struct {
+ unsigned int :24;
+ bool HardwareMailboxStatusAvailable:1;
+ bool MemoryMailboxStatusAvailable:1;
+ unsigned int :6;
+ } Read;
+}
+DAC960_GEM_OutboundDoorBellRegister_T;
+
+/*
+ Define the structure of the DAC960 GEM Series Interrupt Mask Register.
+ */
+typedef union DAC960_GEM_InterruptMaskRegister
+{
+ unsigned int All;
+ struct {
+ unsigned int :16;
+ unsigned int :8;
+ unsigned int HardwareMailboxInterrupt:1;
+ unsigned int MemoryMailboxInterrupt:1;
+ unsigned int :6;
+ } Bits;
+}
+DAC960_GEM_InterruptMaskRegister_T;
+
+/*
+ Define the structure of the DAC960 GEM Series Error Status Register.
+ */
+
+typedef union DAC960_GEM_ErrorStatusRegister
+{
+ unsigned int All;
+ struct {
+ unsigned int :24;
+ unsigned int :5;
+ bool ErrorStatusPending:1;
+ unsigned int :2;
+ } Bits;
+}
+DAC960_GEM_ErrorStatusRegister_T;
+
+/*
+ Define inline functions to provide an abstraction for reading and writing the
+ DAC960 GEM Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_GEM_HardwareMailboxNewCommand(void __iomem *base)
+{
+ DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
+ writel(InboundDoorBellRegister.All,
+ base + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
+}
+
+static inline
+void DAC960_GEM_AcknowledgeHardwareMailboxStatus(void __iomem *base)
+{
+ DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
+ writel(InboundDoorBellRegister.All,
+ base + DAC960_GEM_InboundDoorBellRegisterClearOffset);
+}
+
+static inline
+void DAC960_GEM_GenerateInterrupt(void __iomem *base)
+{
+ DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.GenerateInterrupt = true;
+ writel(InboundDoorBellRegister.All,
+ base + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
+}
+
+static inline
+void DAC960_GEM_ControllerReset(void __iomem *base)
+{
+ DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.ControllerReset = true;
+ writel(InboundDoorBellRegister.All,
+ base + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
+}
+
+static inline
+void DAC960_GEM_MemoryMailboxNewCommand(void __iomem *base)
+{
+ DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
+ writel(InboundDoorBellRegister.All,
+ base + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
+}
+
+static inline
+bool DAC960_GEM_HardwareMailboxFullP(void __iomem *base)
+{
+ DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readl(base + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
+ return InboundDoorBellRegister.Read.HardwareMailboxFull;
+}
+
+static inline
+bool DAC960_GEM_InitializationInProgressP(void __iomem *base)
+{
+ DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readl(base +
+ DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
+ return InboundDoorBellRegister.Read.InitializationInProgress;
+}
+
+static inline
+void DAC960_GEM_AcknowledgeHardwareMailboxInterrupt(void __iomem *base)
+{
+ DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+ writel(OutboundDoorBellRegister.All,
+ base + DAC960_GEM_OutboundDoorBellRegisterClearOffset);
+}
+
+static inline
+void DAC960_GEM_AcknowledgeMemoryMailboxInterrupt(void __iomem *base)
+{
+ DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+ writel(OutboundDoorBellRegister.All,
+ base + DAC960_GEM_OutboundDoorBellRegisterClearOffset);
+}
+
+static inline
+void DAC960_GEM_AcknowledgeInterrupt(void __iomem *base)
+{
+ DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+ OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+ writel(OutboundDoorBellRegister.All,
+ base + DAC960_GEM_OutboundDoorBellRegisterClearOffset);
+}
+
+static inline
+bool DAC960_GEM_HardwareMailboxStatusAvailableP(void __iomem *base)
+{
+ DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readl(base + DAC960_GEM_OutboundDoorBellRegisterReadSetOffset);
+ return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
+}
+
+static inline
+bool DAC960_GEM_MemoryMailboxStatusAvailableP(void __iomem *base)
+{
+ DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readl(base + DAC960_GEM_OutboundDoorBellRegisterReadSetOffset);
+ return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
+}
+
+static inline
+void DAC960_GEM_EnableInterrupts(void __iomem *base)
+{
+ DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All = 0;
+ InterruptMaskRegister.Bits.HardwareMailboxInterrupt = true;
+ InterruptMaskRegister.Bits.MemoryMailboxInterrupt = true;
+ writel(InterruptMaskRegister.All,
+ base + DAC960_GEM_InterruptMaskRegisterClearOffset);
+}
+
+static inline
+void DAC960_GEM_DisableInterrupts(void __iomem *base)
+{
+ DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All = 0;
+ InterruptMaskRegister.Bits.HardwareMailboxInterrupt = true;
+ InterruptMaskRegister.Bits.MemoryMailboxInterrupt = true;
+ writel(InterruptMaskRegister.All,
+ base + DAC960_GEM_InterruptMaskRegisterReadSetOffset);
+}
+
+static inline
+bool DAC960_GEM_InterruptsEnabledP(void __iomem *base)
+{
+ DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All =
+ readl(base + DAC960_GEM_InterruptMaskRegisterReadSetOffset);
+ return !(InterruptMaskRegister.Bits.HardwareMailboxInterrupt ||
+ InterruptMaskRegister.Bits.MemoryMailboxInterrupt);
+}
+
+static inline
+void DAC960_GEM_WriteCommandMailbox(DAC960_V2_CommandMailbox_T *mem_mbox,
+ DAC960_V2_CommandMailbox_T *mbox)
+{
+ memcpy(&mem_mbox->Words[1], &mbox->Words[1],
+ sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
+ wmb();
+ mem_mbox->Words[0] = mbox->Words[0];
+ mb();
+}
+
+static inline
+void DAC960_GEM_WriteHardwareMailbox(void __iomem *base,
+ dma_addr_t CommandMailboxDMA)
+{
+ dma_addr_writeql(CommandMailboxDMA,
+ base + DAC960_GEM_CommandMailboxBusAddressOffset);
+}
+
+static inline unsigned short
+DAC960_GEM_ReadCommandIdentifier(void __iomem *base)
+{
+ return readw(base + DAC960_GEM_CommandStatusOffset);
+}
+
+static inline unsigned char
+DAC960_GEM_ReadCommandStatus(void __iomem *base)
+{
+ return readw(base + DAC960_GEM_CommandStatusOffset + 2);
+}
+
+static inline bool
+DAC960_GEM_ReadErrorStatus(void __iomem *base,
+ unsigned char *ErrorStatus,
+ unsigned char *Parameter0,
+ unsigned char *Parameter1)
+{
+ DAC960_GEM_ErrorStatusRegister_T ErrorStatusRegister;
+ ErrorStatusRegister.All =
+ readl(base + DAC960_GEM_ErrorStatusRegisterReadSetOffset);
+ if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+ ErrorStatusRegister.Bits.ErrorStatusPending = false;
+ *ErrorStatus = ErrorStatusRegister.All;
+ *Parameter0 =
+ readb(base + DAC960_GEM_CommandMailboxBusAddressOffset + 0);
+ *Parameter1 =
+ readb(base + DAC960_GEM_CommandMailboxBusAddressOffset + 1);
+ writel(0x03000000, base +
+ DAC960_GEM_ErrorStatusRegisterClearOffset);
+ return true;
+}
+
+/*
+ Define the DAC960 BA Series Controller Interface Register Offsets.
+*/
+
+#define DAC960_BA_RegisterWindowSize 0x80
+
+typedef enum
+{
+ DAC960_BA_InterruptStatusRegisterOffset = 0x30,
+ DAC960_BA_InterruptMaskRegisterOffset = 0x34,
+ DAC960_BA_CommandMailboxBusAddressOffset = 0x50,
+ DAC960_BA_CommandStatusOffset = 0x58,
+ DAC960_BA_InboundDoorBellRegisterOffset = 0x60,
+ DAC960_BA_OutboundDoorBellRegisterOffset = 0x61,
+ DAC960_BA_ErrorStatusRegisterOffset = 0x63
+}
+DAC960_BA_RegisterOffsets_T;
+
+
+/*
+ Define the structure of the DAC960 BA Series Inbound Door Bell Register.
+*/
+
+typedef union DAC960_BA_InboundDoorBellRegister
+{
+ unsigned char All;
+ struct {
+ bool HardwareMailboxNewCommand:1; /* Bit 0 */
+ bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
+ bool GenerateInterrupt:1; /* Bit 2 */
+ bool ControllerReset:1; /* Bit 3 */
+ bool MemoryMailboxNewCommand:1; /* Bit 4 */
+ unsigned char :3; /* Bits 5-7 */
+ } Write;
+ struct {
+ bool HardwareMailboxEmpty:1; /* Bit 0 */
+ bool InitializationNotInProgress:1; /* Bit 1 */
+ unsigned char :6; /* Bits 2-7 */
+ } Read;
+}
+DAC960_BA_InboundDoorBellRegister_T;
+
+
+/*
+ Define the structure of the DAC960 BA Series Outbound Door Bell Register.
+*/
+
+typedef union DAC960_BA_OutboundDoorBellRegister
+{
+ unsigned char All;
+ struct {
+ bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
+ bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
+ unsigned char :6; /* Bits 2-7 */
+ } Write;
+ struct {
+ bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
+ bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
+ unsigned char :6; /* Bits 2-7 */
+ } Read;
+}
+DAC960_BA_OutboundDoorBellRegister_T;
+
+
+/*
+ Define the structure of the DAC960 BA Series Interrupt Mask Register.
+*/
+
+typedef union DAC960_BA_InterruptMaskRegister
+{
+ unsigned char All;
+ struct {
+ unsigned int :2; /* Bits 0-1 */
+ bool DisableInterrupts:1; /* Bit 2 */
+ bool DisableInterruptsI2O:1; /* Bit 3 */
+ unsigned int :4; /* Bits 4-7 */
+ } Bits;
+}
+DAC960_BA_InterruptMaskRegister_T;
+
+
+/*
+ Define the structure of the DAC960 BA Series Error Status Register.
+*/
+
+typedef union DAC960_BA_ErrorStatusRegister
+{
+ unsigned char All;
+ struct {
+ unsigned int :2; /* Bits 0-1 */
+ bool ErrorStatusPending:1; /* Bit 2 */
+ unsigned int :5; /* Bits 3-7 */
+ } Bits;
+}
+DAC960_BA_ErrorStatusRegister_T;
+
+
+/*
+ Define inline functions to provide an abstraction for reading and writing the
+ DAC960 BA Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_BA_HardwareMailboxNewCommand(void __iomem *base)
+{
+ DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_BA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_AcknowledgeHardwareMailboxStatus(void __iomem *base)
+{
+ DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_BA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_GenerateInterrupt(void __iomem *base)
+{
+ DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.GenerateInterrupt = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_BA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_ControllerReset(void __iomem *base)
+{
+ DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.ControllerReset = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_BA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_MemoryMailboxNewCommand(void __iomem *base)
+{
+ DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_BA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+bool DAC960_BA_HardwareMailboxFullP(void __iomem *base)
+{
+ DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readb(base + DAC960_BA_InboundDoorBellRegisterOffset);
+ return !InboundDoorBellRegister.Read.HardwareMailboxEmpty;
+}
+
+static inline
+bool DAC960_BA_InitializationInProgressP(void __iomem *base)
+{
+ DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readb(base + DAC960_BA_InboundDoorBellRegisterOffset);
+ return !InboundDoorBellRegister.Read.InitializationNotInProgress;
+}
+
+static inline
+void DAC960_BA_AcknowledgeHardwareMailboxInterrupt(void __iomem *base)
+{
+ DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+ writeb(OutboundDoorBellRegister.All,
+ base + DAC960_BA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_AcknowledgeMemoryMailboxInterrupt(void __iomem *base)
+{
+ DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+ writeb(OutboundDoorBellRegister.All,
+ base + DAC960_BA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_AcknowledgeInterrupt(void __iomem *base)
+{
+ DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+ OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+ writeb(OutboundDoorBellRegister.All,
+ base + DAC960_BA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+bool DAC960_BA_HardwareMailboxStatusAvailableP(void __iomem *base)
+{
+ DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readb(base + DAC960_BA_OutboundDoorBellRegisterOffset);
+ return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
+}
+
+static inline
+bool DAC960_BA_MemoryMailboxStatusAvailableP(void __iomem *base)
+{
+ DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readb(base + DAC960_BA_OutboundDoorBellRegisterOffset);
+ return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
+}
+
+static inline
+void DAC960_BA_EnableInterrupts(void __iomem *base)
+{
+ DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All = 0xFF;
+ InterruptMaskRegister.Bits.DisableInterrupts = false;
+ InterruptMaskRegister.Bits.DisableInterruptsI2O = true;
+ writeb(InterruptMaskRegister.All,
+ base + DAC960_BA_InterruptMaskRegisterOffset);
+}
+
+static inline
+void DAC960_BA_DisableInterrupts(void __iomem *base)
+{
+ DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All = 0xFF;
+ InterruptMaskRegister.Bits.DisableInterrupts = true;
+ InterruptMaskRegister.Bits.DisableInterruptsI2O = true;
+ writeb(InterruptMaskRegister.All,
+ base + DAC960_BA_InterruptMaskRegisterOffset);
+}
+
+static inline
+bool DAC960_BA_InterruptsEnabledP(void __iomem *base)
+{
+ DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All =
+ readb(base + DAC960_BA_InterruptMaskRegisterOffset);
+ return !InterruptMaskRegister.Bits.DisableInterrupts;
+}
+
+static inline
+void DAC960_BA_WriteCommandMailbox(DAC960_V2_CommandMailbox_T *mem_mbox,
+ DAC960_V2_CommandMailbox_T *mbox)
+{
+ memcpy(&mem_mbox->Words[1], &mbox->Words[1],
+ sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
+ wmb();
+ mem_mbox->Words[0] = mbox->Words[0];
+ mb();
+}
+
+
+static inline
+void DAC960_BA_WriteHardwareMailbox(void __iomem *base,
+ dma_addr_t CommandMailboxDMA)
+{
+ dma_addr_writeql(CommandMailboxDMA,
+ base + DAC960_BA_CommandMailboxBusAddressOffset);
+}
+
+static inline unsigned short
+DAC960_BA_ReadCommandIdentifier(void __iomem *base)
+{
+ return readw(base + DAC960_BA_CommandStatusOffset);
+}
+
+static inline unsigned char
+DAC960_BA_ReadCommandStatus(void __iomem *base)
+{
+ return readw(base + DAC960_BA_CommandStatusOffset + 2);
+}
+
+static inline bool
+DAC960_BA_ReadErrorStatus(void __iomem *base,
+ unsigned char *ErrorStatus,
+ unsigned char *Parameter0,
+ unsigned char *Parameter1)
+{
+ DAC960_BA_ErrorStatusRegister_T ErrorStatusRegister;
+ ErrorStatusRegister.All =
+ readb(base + DAC960_BA_ErrorStatusRegisterOffset);
+ if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+ ErrorStatusRegister.Bits.ErrorStatusPending = false;
+ *ErrorStatus = ErrorStatusRegister.All;
+ *Parameter0 = readb(base + DAC960_BA_CommandMailboxBusAddressOffset + 0);
+ *Parameter1 = readb(base + DAC960_BA_CommandMailboxBusAddressOffset + 1);
+ writeb(0xFF, base + DAC960_BA_ErrorStatusRegisterOffset);
+ return true;
+}
+
+
+/*
+ Define the DAC960 LP Series Controller Interface Register Offsets.
+*/
+
+#define DAC960_LP_RegisterWindowSize 0x80
+
+typedef enum
+{
+ DAC960_LP_CommandMailboxBusAddressOffset = 0x10,
+ DAC960_LP_CommandStatusOffset = 0x18,
+ DAC960_LP_InboundDoorBellRegisterOffset = 0x20,
+ DAC960_LP_OutboundDoorBellRegisterOffset = 0x2C,
+ DAC960_LP_ErrorStatusRegisterOffset = 0x2E,
+ DAC960_LP_InterruptStatusRegisterOffset = 0x30,
+ DAC960_LP_InterruptMaskRegisterOffset = 0x34,
+}
+DAC960_LP_RegisterOffsets_T;
+
+
+/*
+ Define the structure of the DAC960 LP Series Inbound Door Bell Register.
+*/
+
+typedef union DAC960_LP_InboundDoorBellRegister
+{
+ unsigned char All;
+ struct {
+ bool HardwareMailboxNewCommand:1; /* Bit 0 */
+ bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
+ bool GenerateInterrupt:1; /* Bit 2 */
+ bool ControllerReset:1; /* Bit 3 */
+ bool MemoryMailboxNewCommand:1; /* Bit 4 */
+ unsigned char :3; /* Bits 5-7 */
+ } Write;
+ struct {
+ bool HardwareMailboxFull:1; /* Bit 0 */
+ bool InitializationInProgress:1; /* Bit 1 */
+ unsigned char :6; /* Bits 2-7 */
+ } Read;
+}
+DAC960_LP_InboundDoorBellRegister_T;
+
+
+/*
+ Define the structure of the DAC960 LP Series Outbound Door Bell Register.
+*/
+
+typedef union DAC960_LP_OutboundDoorBellRegister
+{
+ unsigned char All;
+ struct {
+ bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
+ bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
+ unsigned char :6; /* Bits 2-7 */
+ } Write;
+ struct {
+ bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
+ bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
+ unsigned char :6; /* Bits 2-7 */
+ } Read;
+}
+DAC960_LP_OutboundDoorBellRegister_T;
+
+
+/*
+ Define the structure of the DAC960 LP Series Interrupt Mask Register.
+*/
+
+typedef union DAC960_LP_InterruptMaskRegister
+{
+ unsigned char All;
+ struct {
+ unsigned int :2; /* Bits 0-1 */
+ bool DisableInterrupts:1; /* Bit 2 */
+ unsigned int :5; /* Bits 3-7 */
+ } Bits;
+}
+DAC960_LP_InterruptMaskRegister_T;
+
+
+/*
+ Define the structure of the DAC960 LP Series Error Status Register.
+*/
+
+typedef union DAC960_LP_ErrorStatusRegister
+{
+ unsigned char All;
+ struct {
+ unsigned int :2; /* Bits 0-1 */
+ bool ErrorStatusPending:1; /* Bit 2 */
+ unsigned int :5; /* Bits 3-7 */
+ } Bits;
+}
+DAC960_LP_ErrorStatusRegister_T;
+
+
+/*
+ Define inline functions to provide an abstraction for reading and writing the
+ DAC960 LP Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_LP_HardwareMailboxNewCommand(void __iomem *base)
+{
+ DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_LP_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_AcknowledgeHardwareMailboxStatus(void __iomem *base)
+{
+ DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_LP_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_GenerateInterrupt(void __iomem *base)
+{
+ DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.GenerateInterrupt = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_LP_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_ControllerReset(void __iomem *base)
+{
+ DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.ControllerReset = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_LP_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_MemoryMailboxNewCommand(void __iomem *base)
+{
+ DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_LP_InboundDoorBellRegisterOffset);
+}
+
+static inline
+bool DAC960_LP_HardwareMailboxFullP(void __iomem *base)
+{
+ DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readb(base + DAC960_LP_InboundDoorBellRegisterOffset);
+ return InboundDoorBellRegister.Read.HardwareMailboxFull;
+}
+
+static inline
+bool DAC960_LP_InitializationInProgressP(void __iomem *base)
+{
+ DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readb(base + DAC960_LP_InboundDoorBellRegisterOffset);
+ return InboundDoorBellRegister.Read.InitializationInProgress;
+}
+
+static inline
+void DAC960_LP_AcknowledgeHardwareMailboxInterrupt(void __iomem *base)
+{
+ DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+ writeb(OutboundDoorBellRegister.All,
+ base + DAC960_LP_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_AcknowledgeMemoryMailboxInterrupt(void __iomem *base)
+{
+ DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+ writeb(OutboundDoorBellRegister.All,
+ base + DAC960_LP_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_AcknowledgeInterrupt(void __iomem *base)
+{
+ DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+ OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+ writeb(OutboundDoorBellRegister.All,
+ base + DAC960_LP_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+bool DAC960_LP_HardwareMailboxStatusAvailableP(void __iomem *base)
+{
+ DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readb(base + DAC960_LP_OutboundDoorBellRegisterOffset);
+ return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
+}
+
+static inline
+bool DAC960_LP_MemoryMailboxStatusAvailableP(void __iomem *base)
+{
+ DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readb(base + DAC960_LP_OutboundDoorBellRegisterOffset);
+ return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
+}
+
+static inline
+void DAC960_LP_EnableInterrupts(void __iomem *base)
+{
+ DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All = 0xFF;
+ InterruptMaskRegister.Bits.DisableInterrupts = false;
+ writeb(InterruptMaskRegister.All,
+ base + DAC960_LP_InterruptMaskRegisterOffset);
+}
+
+static inline
+void DAC960_LP_DisableInterrupts(void __iomem *base)
+{
+ DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All = 0xFF;
+ InterruptMaskRegister.Bits.DisableInterrupts = true;
+ writeb(InterruptMaskRegister.All,
+ base + DAC960_LP_InterruptMaskRegisterOffset);
+}
+
+static inline
+bool DAC960_LP_InterruptsEnabledP(void __iomem *base)
+{
+ DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All =
+ readb(base + DAC960_LP_InterruptMaskRegisterOffset);
+ return !InterruptMaskRegister.Bits.DisableInterrupts;
+}
+
+static inline
+void DAC960_LP_WriteCommandMailbox(DAC960_V2_CommandMailbox_T *mem_mbox,
+ DAC960_V2_CommandMailbox_T *mbox)
+{
+ memcpy(&mem_mbox->Words[1], &mbox->Words[1],
+ sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
+ wmb();
+ mem_mbox->Words[0] = mbox->Words[0];
+ mb();
+}
+
+static inline
+void DAC960_LP_WriteHardwareMailbox(void __iomem *base,
+ dma_addr_t CommandMailboxDMA)
+{
+ dma_addr_writeql(CommandMailboxDMA,
+ base +
+ DAC960_LP_CommandMailboxBusAddressOffset);
+}
+
+static inline unsigned short
+DAC960_LP_ReadCommandIdentifier(void __iomem *base)
+{
+ return readw(base + DAC960_LP_CommandStatusOffset);
+}
+
+static inline unsigned char
+DAC960_LP_ReadCommandStatus(void __iomem *base)
+{
+ return readw(base + DAC960_LP_CommandStatusOffset + 2);
+}
+
+static inline bool
+DAC960_LP_ReadErrorStatus(void __iomem *base,
+ unsigned char *ErrorStatus,
+ unsigned char *Parameter0,
+ unsigned char *Parameter1)
+{
+ DAC960_LP_ErrorStatusRegister_T ErrorStatusRegister;
+ ErrorStatusRegister.All =
+ readb(base + DAC960_LP_ErrorStatusRegisterOffset);
+ if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+ ErrorStatusRegister.Bits.ErrorStatusPending = false;
+ *ErrorStatus = ErrorStatusRegister.All;
+ *Parameter0 =
+ readb(base + DAC960_LP_CommandMailboxBusAddressOffset + 0);
+ *Parameter1 =
+ readb(base + DAC960_LP_CommandMailboxBusAddressOffset + 1);
+ writeb(0xFF, base + DAC960_LP_ErrorStatusRegisterOffset);
+ return true;
+}
+
+
+/*
+ Define the DAC960 LA Series Controller Interface Register Offsets.
+*/
+
+#define DAC960_LA_RegisterWindowSize 0x80
+
+typedef enum
+{
+ DAC960_LA_InterruptMaskRegisterOffset = 0x34,
+ DAC960_LA_CommandOpcodeRegisterOffset = 0x50,
+ DAC960_LA_CommandIdentifierRegisterOffset = 0x51,
+ DAC960_LA_MailboxRegister2Offset = 0x52,
+ DAC960_LA_MailboxRegister3Offset = 0x53,
+ DAC960_LA_MailboxRegister4Offset = 0x54,
+ DAC960_LA_MailboxRegister5Offset = 0x55,
+ DAC960_LA_MailboxRegister6Offset = 0x56,
+ DAC960_LA_MailboxRegister7Offset = 0x57,
+ DAC960_LA_MailboxRegister8Offset = 0x58,
+ DAC960_LA_MailboxRegister9Offset = 0x59,
+ DAC960_LA_MailboxRegister10Offset = 0x5A,
+ DAC960_LA_MailboxRegister11Offset = 0x5B,
+ DAC960_LA_MailboxRegister12Offset = 0x5C,
+ DAC960_LA_StatusCommandIdentifierRegOffset = 0x5D,
+ DAC960_LA_StatusRegisterOffset = 0x5E,
+ DAC960_LA_InboundDoorBellRegisterOffset = 0x60,
+ DAC960_LA_OutboundDoorBellRegisterOffset = 0x61,
+ DAC960_LA_ErrorStatusRegisterOffset = 0x63
+}
+DAC960_LA_RegisterOffsets_T;
+
+
+/*
+ Define the structure of the DAC960 LA Series Inbound Door Bell Register.
+*/
+
+typedef union DAC960_LA_InboundDoorBellRegister
+{
+ unsigned char All;
+ struct {
+ bool HardwareMailboxNewCommand:1; /* Bit 0 */
+ bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
+ bool GenerateInterrupt:1; /* Bit 2 */
+ bool ControllerReset:1; /* Bit 3 */
+ bool MemoryMailboxNewCommand:1; /* Bit 4 */
+ unsigned char :3; /* Bits 5-7 */
+ } Write;
+ struct {
+ bool HardwareMailboxEmpty:1; /* Bit 0 */
+ bool InitializationNotInProgress:1; /* Bit 1 */
+ unsigned char :6; /* Bits 2-7 */
+ } Read;
+}
+DAC960_LA_InboundDoorBellRegister_T;
+
+
+/*
+ Define the structure of the DAC960 LA Series Outbound Door Bell Register.
+*/
+
+typedef union DAC960_LA_OutboundDoorBellRegister
+{
+ unsigned char All;
+ struct {
+ bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
+ bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
+ unsigned char :6; /* Bits 2-7 */
+ } Write;
+ struct {
+ bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
+ bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
+ unsigned char :6; /* Bits 2-7 */
+ } Read;
+}
+DAC960_LA_OutboundDoorBellRegister_T;
+
+
+/*
+ Define the structure of the DAC960 LA Series Interrupt Mask Register.
+*/
+
+typedef union DAC960_LA_InterruptMaskRegister
+{
+ unsigned char All;
+ struct {
+ unsigned char :2; /* Bits 0-1 */
+ bool DisableInterrupts:1; /* Bit 2 */
+ unsigned char :5; /* Bits 3-7 */
+ } Bits;
+}
+DAC960_LA_InterruptMaskRegister_T;
+
+
+/*
+ Define the structure of the DAC960 LA Series Error Status Register.
+*/
+
+typedef union DAC960_LA_ErrorStatusRegister
+{
+ unsigned char All;
+ struct {
+ unsigned int :2; /* Bits 0-1 */
+ bool ErrorStatusPending:1; /* Bit 2 */
+ unsigned int :5; /* Bits 3-7 */
+ } Bits;
+}
+DAC960_LA_ErrorStatusRegister_T;
+
+
+/*
+ Define inline functions to provide an abstraction for reading and writing the
+ DAC960 LA Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_LA_HardwareMailboxNewCommand(void __iomem *base)
+{
+ DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_LA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_AcknowledgeHardwareMailboxStatus(void __iomem *base)
+{
+ DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_LA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_GenerateInterrupt(void __iomem *base)
+{
+ DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.GenerateInterrupt = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_LA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_ControllerReset(void __iomem *base)
+{
+ DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.ControllerReset = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_LA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_MemoryMailboxNewCommand(void __iomem *base)
+{
+ DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_LA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+bool DAC960_LA_HardwareMailboxFullP(void __iomem *base)
+{
+ DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readb(base + DAC960_LA_InboundDoorBellRegisterOffset);
+ return !InboundDoorBellRegister.Read.HardwareMailboxEmpty;
+}
+
+static inline
+bool DAC960_LA_InitializationInProgressP(void __iomem *base)
+{
+ DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readb(base + DAC960_LA_InboundDoorBellRegisterOffset);
+ return !InboundDoorBellRegister.Read.InitializationNotInProgress;
+}
+
+static inline
+void DAC960_LA_AcknowledgeHardwareMailboxInterrupt(void __iomem *base)
+{
+ DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+ writeb(OutboundDoorBellRegister.All,
+ base + DAC960_LA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_AcknowledgeMemoryMailboxInterrupt(void __iomem *base)
+{
+ DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+ writeb(OutboundDoorBellRegister.All,
+ base + DAC960_LA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_AcknowledgeInterrupt(void __iomem *base)
+{
+ DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+ OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+ writeb(OutboundDoorBellRegister.All,
+ base + DAC960_LA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+bool DAC960_LA_HardwareMailboxStatusAvailableP(void __iomem *base)
+{
+ DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readb(base + DAC960_LA_OutboundDoorBellRegisterOffset);
+ return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
+}
+
+static inline
+bool DAC960_LA_MemoryMailboxStatusAvailableP(void __iomem *base)
+{
+ DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readb(base + DAC960_LA_OutboundDoorBellRegisterOffset);
+ return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
+}
+
+static inline
+void DAC960_LA_EnableInterrupts(void __iomem *base)
+{
+ DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All = 0xFF;
+ InterruptMaskRegister.Bits.DisableInterrupts = false;
+ writeb(InterruptMaskRegister.All,
+ base + DAC960_LA_InterruptMaskRegisterOffset);
+}
+
+static inline
+void DAC960_LA_DisableInterrupts(void __iomem *base)
+{
+ DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All = 0xFF;
+ InterruptMaskRegister.Bits.DisableInterrupts = true;
+ writeb(InterruptMaskRegister.All,
+ base + DAC960_LA_InterruptMaskRegisterOffset);
+}
+
+static inline
+bool DAC960_LA_InterruptsEnabledP(void __iomem *base)
+{
+ DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All =
+ readb(base + DAC960_LA_InterruptMaskRegisterOffset);
+ return !InterruptMaskRegister.Bits.DisableInterrupts;
+}
+
+static inline
+void DAC960_LA_WriteCommandMailbox(DAC960_V1_CommandMailbox_T *mem_mbox,
+ DAC960_V1_CommandMailbox_T *mbox)
+{
+ mem_mbox->Words[1] = mbox->Words[1];
+ mem_mbox->Words[2] = mbox->Words[2];
+ mem_mbox->Words[3] = mbox->Words[3];
+ wmb();
+ mem_mbox->Words[0] = mbox->Words[0];
+ mb();
+}
+
+static inline
+void DAC960_LA_WriteHardwareMailbox(void __iomem *base,
+ DAC960_V1_CommandMailbox_T *mbox)
+{
+ writel(mbox->Words[0],
+ base + DAC960_LA_CommandOpcodeRegisterOffset);
+ writel(mbox->Words[1],
+ base + DAC960_LA_MailboxRegister4Offset);
+ writel(mbox->Words[2],
+ base + DAC960_LA_MailboxRegister8Offset);
+ writeb(mbox->Bytes[12],
+ base + DAC960_LA_MailboxRegister12Offset);
+}
+
+static inline unsigned char
+DAC960_LA_ReadStatusCommandIdentifier(void __iomem *base)
+{
+ return readb(base
+ + DAC960_LA_StatusCommandIdentifierRegOffset);
+}
+
+static inline unsigned short
+DAC960_LA_ReadStatusRegister(void __iomem *base)
+{
+ return readw(base + DAC960_LA_StatusRegisterOffset);
+}
+
+static inline bool
+DAC960_LA_ReadErrorStatus(void __iomem *base,
+ unsigned char *ErrorStatus,
+ unsigned char *Parameter0,
+ unsigned char *Parameter1)
+{
+ DAC960_LA_ErrorStatusRegister_T ErrorStatusRegister;
+ ErrorStatusRegister.All =
+ readb(base + DAC960_LA_ErrorStatusRegisterOffset);
+ if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+ ErrorStatusRegister.Bits.ErrorStatusPending = false;
+ *ErrorStatus = ErrorStatusRegister.All;
+ *Parameter0 =
+ readb(base + DAC960_LA_CommandOpcodeRegisterOffset);
+ *Parameter1 =
+ readb(base + DAC960_LA_CommandIdentifierRegisterOffset);
+ writeb(0xFF, base + DAC960_LA_ErrorStatusRegisterOffset);
+ return true;
+}
+
+/*
+ Define the DAC960 PG Series Controller Interface Register Offsets.
+*/
+
+#define DAC960_PG_RegisterWindowSize 0x2000
+
+typedef enum
+{
+ DAC960_PG_InboundDoorBellRegisterOffset = 0x0020,
+ DAC960_PG_OutboundDoorBellRegisterOffset = 0x002C,
+ DAC960_PG_InterruptMaskRegisterOffset = 0x0034,
+ DAC960_PG_CommandOpcodeRegisterOffset = 0x1000,
+ DAC960_PG_CommandIdentifierRegisterOffset = 0x1001,
+ DAC960_PG_MailboxRegister2Offset = 0x1002,
+ DAC960_PG_MailboxRegister3Offset = 0x1003,
+ DAC960_PG_MailboxRegister4Offset = 0x1004,
+ DAC960_PG_MailboxRegister5Offset = 0x1005,
+ DAC960_PG_MailboxRegister6Offset = 0x1006,
+ DAC960_PG_MailboxRegister7Offset = 0x1007,
+ DAC960_PG_MailboxRegister8Offset = 0x1008,
+ DAC960_PG_MailboxRegister9Offset = 0x1009,
+ DAC960_PG_MailboxRegister10Offset = 0x100A,
+ DAC960_PG_MailboxRegister11Offset = 0x100B,
+ DAC960_PG_MailboxRegister12Offset = 0x100C,
+ DAC960_PG_StatusCommandIdentifierRegOffset = 0x1018,
+ DAC960_PG_StatusRegisterOffset = 0x101A,
+ DAC960_PG_ErrorStatusRegisterOffset = 0x103F
+}
+DAC960_PG_RegisterOffsets_T;
+
+
+/*
+ Define the structure of the DAC960 PG Series Inbound Door Bell Register.
+*/
+
+typedef union DAC960_PG_InboundDoorBellRegister
+{
+ unsigned int All;
+ struct {
+ bool HardwareMailboxNewCommand:1; /* Bit 0 */
+ bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
+ bool GenerateInterrupt:1; /* Bit 2 */
+ bool ControllerReset:1; /* Bit 3 */
+ bool MemoryMailboxNewCommand:1; /* Bit 4 */
+ unsigned int :27; /* Bits 5-31 */
+ } Write;
+ struct {
+ bool HardwareMailboxFull:1; /* Bit 0 */
+ bool InitializationInProgress:1; /* Bit 1 */
+ unsigned int :30; /* Bits 2-31 */
+ } Read;
+}
+DAC960_PG_InboundDoorBellRegister_T;
+
+
+/*
+ Define the structure of the DAC960 PG Series Outbound Door Bell Register.
+*/
+
+typedef union DAC960_PG_OutboundDoorBellRegister
+{
+ unsigned int All;
+ struct {
+ bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
+ bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
+ unsigned int :30; /* Bits 2-31 */
+ } Write;
+ struct {
+ bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
+ bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
+ unsigned int :30; /* Bits 2-31 */
+ } Read;
+}
+DAC960_PG_OutboundDoorBellRegister_T;
+
+
+/*
+ Define the structure of the DAC960 PG Series Interrupt Mask Register.
+*/
+
+typedef union DAC960_PG_InterruptMaskRegister
+{
+ unsigned int All;
+ struct {
+ unsigned int MessageUnitInterruptMask1:2; /* Bits 0-1 */
+ bool DisableInterrupts:1; /* Bit 2 */
+ unsigned int MessageUnitInterruptMask2:5; /* Bits 3-7 */
+ unsigned int Reserved0:24; /* Bits 8-31 */
+ } Bits;
+}
+DAC960_PG_InterruptMaskRegister_T;
+
+
+/*
+ Define the structure of the DAC960 PG Series Error Status Register.
+*/
+
+typedef union DAC960_PG_ErrorStatusRegister
+{
+ unsigned char All;
+ struct {
+ unsigned int :2; /* Bits 0-1 */
+ bool ErrorStatusPending:1; /* Bit 2 */
+ unsigned int :5; /* Bits 3-7 */
+ } Bits;
+}
+DAC960_PG_ErrorStatusRegister_T;
+
+
+/*
+ Define inline functions to provide an abstraction for reading and writing the
+ DAC960 PG Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_PG_HardwareMailboxNewCommand(void __iomem *base)
+{
+ DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
+ writel(InboundDoorBellRegister.All,
+ base + DAC960_PG_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_AcknowledgeHardwareMailboxStatus(void __iomem *base)
+{
+ DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
+ writel(InboundDoorBellRegister.All,
+ base + DAC960_PG_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_GenerateInterrupt(void __iomem *base)
+{
+ DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.GenerateInterrupt = true;
+ writel(InboundDoorBellRegister.All,
+ base + DAC960_PG_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_ControllerReset(void __iomem *base)
+{
+ DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.ControllerReset = true;
+ writel(InboundDoorBellRegister.All,
+ base + DAC960_PG_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_MemoryMailboxNewCommand(void __iomem *base)
+{
+ DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
+ writel(InboundDoorBellRegister.All,
+ base + DAC960_PG_InboundDoorBellRegisterOffset);
+}
+
+static inline
+bool DAC960_PG_HardwareMailboxFullP(void __iomem *base)
+{
+ DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readl(base + DAC960_PG_InboundDoorBellRegisterOffset);
+ return InboundDoorBellRegister.Read.HardwareMailboxFull;
+}
+
+static inline
+bool DAC960_PG_InitializationInProgressP(void __iomem *base)
+{
+ DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readl(base + DAC960_PG_InboundDoorBellRegisterOffset);
+ return InboundDoorBellRegister.Read.InitializationInProgress;
+}
+
+static inline
+void DAC960_PG_AcknowledgeHardwareMailboxInterrupt(void __iomem *base)
+{
+ DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+ writel(OutboundDoorBellRegister.All,
+ base + DAC960_PG_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_AcknowledgeMemoryMailboxInterrupt(void __iomem *base)
+{
+ DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+ writel(OutboundDoorBellRegister.All,
+ base + DAC960_PG_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_AcknowledgeInterrupt(void __iomem *base)
+{
+ DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+ OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+ writel(OutboundDoorBellRegister.All,
+ base + DAC960_PG_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+bool DAC960_PG_HardwareMailboxStatusAvailableP(void __iomem *base)
+{
+ DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readl(base + DAC960_PG_OutboundDoorBellRegisterOffset);
+ return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
+}
+
+static inline
+bool DAC960_PG_MemoryMailboxStatusAvailableP(void __iomem *base)
+{
+ DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readl(base + DAC960_PG_OutboundDoorBellRegisterOffset);
+ return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
+}
+
+static inline
+void DAC960_PG_EnableInterrupts(void __iomem *base)
+{
+ DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All = 0;
+ InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3;
+ InterruptMaskRegister.Bits.DisableInterrupts = false;
+ InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F;
+ writel(InterruptMaskRegister.All,
+ base + DAC960_PG_InterruptMaskRegisterOffset);
+}
+
+static inline
+void DAC960_PG_DisableInterrupts(void __iomem *base)
+{
+ DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All = 0;
+ InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3;
+ InterruptMaskRegister.Bits.DisableInterrupts = true;
+ InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F;
+ writel(InterruptMaskRegister.All,
+ base + DAC960_PG_InterruptMaskRegisterOffset);
+}
+
+static inline
+bool DAC960_PG_InterruptsEnabledP(void __iomem *base)
+{
+ DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
+ InterruptMaskRegister.All =
+ readl(base + DAC960_PG_InterruptMaskRegisterOffset);
+ return !InterruptMaskRegister.Bits.DisableInterrupts;
+}
+
+static inline
+void DAC960_PG_WriteCommandMailbox(DAC960_V1_CommandMailbox_T *mem_mbox,
+ DAC960_V1_CommandMailbox_T *mbox)
+{
+ mem_mbox->Words[1] = mbox->Words[1];
+ mem_mbox->Words[2] = mbox->Words[2];
+ mem_mbox->Words[3] = mbox->Words[3];
+ wmb();
+ mem_mbox->Words[0] = mbox->Words[0];
+ mb();
+}
+
+static inline
+void DAC960_PG_WriteHardwareMailbox(void __iomem *base,
+ DAC960_V1_CommandMailbox_T *mbox)
+{
+ writel(mbox->Words[0],
+ base + DAC960_PG_CommandOpcodeRegisterOffset);
+ writel(mbox->Words[1],
+ base + DAC960_PG_MailboxRegister4Offset);
+ writel(mbox->Words[2],
+ base + DAC960_PG_MailboxRegister8Offset);
+ writeb(mbox->Bytes[12],
+ base + DAC960_PG_MailboxRegister12Offset);
+}
+
+static inline unsigned char
+DAC960_PG_ReadStatusCommandIdentifier(void __iomem *base)
+{
+ return readb(base
+ + DAC960_PG_StatusCommandIdentifierRegOffset);
+}
+
+static inline unsigned short
+DAC960_PG_ReadStatusRegister(void __iomem *base)
+{
+ return readw(base + DAC960_PG_StatusRegisterOffset);
+}
+
+static inline bool
+DAC960_PG_ReadErrorStatus(void __iomem *base,
+ unsigned char *ErrorStatus,
+ unsigned char *Parameter0,
+ unsigned char *Parameter1)
+{
+ DAC960_PG_ErrorStatusRegister_T ErrorStatusRegister;
+ ErrorStatusRegister.All =
+ readb(base + DAC960_PG_ErrorStatusRegisterOffset);
+ if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+ ErrorStatusRegister.Bits.ErrorStatusPending = false;
+ *ErrorStatus = ErrorStatusRegister.All;
+ *Parameter0 = readb(base + DAC960_PG_CommandOpcodeRegisterOffset);
+ *Parameter1 = readb(base + DAC960_PG_CommandIdentifierRegisterOffset);
+ writeb(0, base + DAC960_PG_ErrorStatusRegisterOffset);
+ return true;
+}
+
+/*
+ Define the DAC960 PD Series Controller Interface Register Offsets.
+*/
+
+#define DAC960_PD_RegisterWindowSize 0x80
+
+typedef enum
+{
+ DAC960_PD_CommandOpcodeRegisterOffset = 0x00,
+ DAC960_PD_CommandIdentifierRegisterOffset = 0x01,
+ DAC960_PD_MailboxRegister2Offset = 0x02,
+ DAC960_PD_MailboxRegister3Offset = 0x03,
+ DAC960_PD_MailboxRegister4Offset = 0x04,
+ DAC960_PD_MailboxRegister5Offset = 0x05,
+ DAC960_PD_MailboxRegister6Offset = 0x06,
+ DAC960_PD_MailboxRegister7Offset = 0x07,
+ DAC960_PD_MailboxRegister8Offset = 0x08,
+ DAC960_PD_MailboxRegister9Offset = 0x09,
+ DAC960_PD_MailboxRegister10Offset = 0x0A,
+ DAC960_PD_MailboxRegister11Offset = 0x0B,
+ DAC960_PD_MailboxRegister12Offset = 0x0C,
+ DAC960_PD_StatusCommandIdentifierRegOffset = 0x0D,
+ DAC960_PD_StatusRegisterOffset = 0x0E,
+ DAC960_PD_ErrorStatusRegisterOffset = 0x3F,
+ DAC960_PD_InboundDoorBellRegisterOffset = 0x40,
+ DAC960_PD_OutboundDoorBellRegisterOffset = 0x41,
+ DAC960_PD_InterruptEnableRegisterOffset = 0x43
+}
+DAC960_PD_RegisterOffsets_T;
+
+
+/*
+ Define the structure of the DAC960 PD Series Inbound Door Bell Register.
+*/
+
+typedef union DAC960_PD_InboundDoorBellRegister
+{
+ unsigned char All;
+ struct {
+ bool NewCommand:1; /* Bit 0 */
+ bool AcknowledgeStatus:1; /* Bit 1 */
+ bool GenerateInterrupt:1; /* Bit 2 */
+ bool ControllerReset:1; /* Bit 3 */
+ unsigned char :4; /* Bits 4-7 */
+ } Write;
+ struct {
+ bool MailboxFull:1; /* Bit 0 */
+ bool InitializationInProgress:1; /* Bit 1 */
+ unsigned char :6; /* Bits 2-7 */
+ } Read;
+}
+DAC960_PD_InboundDoorBellRegister_T;
+
+
+/*
+ Define the structure of the DAC960 PD Series Outbound Door Bell Register.
+*/
+
+typedef union DAC960_PD_OutboundDoorBellRegister
+{
+ unsigned char All;
+ struct {
+ bool AcknowledgeInterrupt:1; /* Bit 0 */
+ unsigned char :7; /* Bits 1-7 */
+ } Write;
+ struct {
+ bool StatusAvailable:1; /* Bit 0 */
+ unsigned char :7; /* Bits 1-7 */
+ } Read;
+}
+DAC960_PD_OutboundDoorBellRegister_T;
+
+
+/*
+ Define the structure of the DAC960 PD Series Interrupt Enable Register.
+*/
+
+typedef union DAC960_PD_InterruptEnableRegister
+{
+ unsigned char All;
+ struct {
+ bool EnableInterrupts:1; /* Bit 0 */
+ unsigned char :7; /* Bits 1-7 */
+ } Bits;
+}
+DAC960_PD_InterruptEnableRegister_T;
+
+
+/*
+ Define the structure of the DAC960 PD Series Error Status Register.
+*/
+
+typedef union DAC960_PD_ErrorStatusRegister
+{
+ unsigned char All;
+ struct {
+ unsigned int :2; /* Bits 0-1 */
+ bool ErrorStatusPending:1; /* Bit 2 */
+ unsigned int :5; /* Bits 3-7 */
+ } Bits;
+}
+DAC960_PD_ErrorStatusRegister_T;
+
+
+/*
+ Define inline functions to provide an abstraction for reading and writing the
+ DAC960 PD Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_PD_NewCommand(void __iomem *base)
+{
+ DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.NewCommand = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_PD_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PD_AcknowledgeStatus(void __iomem *base)
+{
+ DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.AcknowledgeStatus = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_PD_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PD_GenerateInterrupt(void __iomem *base)
+{
+ DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.GenerateInterrupt = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_PD_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PD_ControllerReset(void __iomem *base)
+{
+ DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All = 0;
+ InboundDoorBellRegister.Write.ControllerReset = true;
+ writeb(InboundDoorBellRegister.All,
+ base + DAC960_PD_InboundDoorBellRegisterOffset);
+}
+
+static inline
+bool DAC960_PD_MailboxFullP(void __iomem *base)
+{
+ DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readb(base + DAC960_PD_InboundDoorBellRegisterOffset);
+ return InboundDoorBellRegister.Read.MailboxFull;
+}
+
+static inline
+bool DAC960_PD_InitializationInProgressP(void __iomem *base)
+{
+ DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+ InboundDoorBellRegister.All =
+ readb(base + DAC960_PD_InboundDoorBellRegisterOffset);
+ return InboundDoorBellRegister.Read.InitializationInProgress;
+}
+
+static inline
+void DAC960_PD_AcknowledgeInterrupt(void __iomem *base)
+{
+ DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All = 0;
+ OutboundDoorBellRegister.Write.AcknowledgeInterrupt = true;
+ writeb(OutboundDoorBellRegister.All,
+ base + DAC960_PD_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+bool DAC960_PD_StatusAvailableP(void __iomem *base)
+{
+ DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+ OutboundDoorBellRegister.All =
+ readb(base + DAC960_PD_OutboundDoorBellRegisterOffset);
+ return OutboundDoorBellRegister.Read.StatusAvailable;
+}
+
+static inline
+void DAC960_PD_EnableInterrupts(void __iomem *base)
+{
+ DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
+ InterruptEnableRegister.All = 0;
+ InterruptEnableRegister.Bits.EnableInterrupts = true;
+ writeb(InterruptEnableRegister.All,
+ base + DAC960_PD_InterruptEnableRegisterOffset);
+}
+
+static inline
+void DAC960_PD_DisableInterrupts(void __iomem *base)
+{
+ DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
+ InterruptEnableRegister.All = 0;
+ InterruptEnableRegister.Bits.EnableInterrupts = false;
+ writeb(InterruptEnableRegister.All,
+ base + DAC960_PD_InterruptEnableRegisterOffset);
+}
+
+static inline
+bool DAC960_PD_InterruptsEnabledP(void __iomem *base)
+{
+ DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
+ InterruptEnableRegister.All =
+ readb(base + DAC960_PD_InterruptEnableRegisterOffset);
+ return InterruptEnableRegister.Bits.EnableInterrupts;
+}
+
+static inline
+void DAC960_PD_WriteCommandMailbox(void __iomem *base,
+ DAC960_V1_CommandMailbox_T *mbox)
+{
+ writel(mbox->Words[0],
+ base + DAC960_PD_CommandOpcodeRegisterOffset);
+ writel(mbox->Words[1],
+ base + DAC960_PD_MailboxRegister4Offset);
+ writel(mbox->Words[2],
+ base + DAC960_PD_MailboxRegister8Offset);
+ writeb(mbox->Bytes[12],
+ base + DAC960_PD_MailboxRegister12Offset);
+}
+
+static inline unsigned char
+DAC960_PD_ReadStatusCommandIdentifier(void __iomem *base)
+{
+ return readb(base
+ + DAC960_PD_StatusCommandIdentifierRegOffset);
+}
+
+static inline unsigned short
+DAC960_PD_ReadStatusRegister(void __iomem *base)
+{
+ return readw(base + DAC960_PD_StatusRegisterOffset);
+}
+
+static inline bool
+DAC960_PD_ReadErrorStatus(void __iomem *base,
+ unsigned char *ErrorStatus,
+ unsigned char *Parameter0,
+ unsigned char *Parameter1)
+{
+ DAC960_PD_ErrorStatusRegister_T ErrorStatusRegister;
+ ErrorStatusRegister.All =
+ readb(base + DAC960_PD_ErrorStatusRegisterOffset);
+ if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+ ErrorStatusRegister.Bits.ErrorStatusPending = false;
+ *ErrorStatus = ErrorStatusRegister.All;
+ *Parameter0 = readb(base + DAC960_PD_CommandOpcodeRegisterOffset);
+ *Parameter1 = readb(base + DAC960_PD_CommandIdentifierRegisterOffset);
+ writeb(0, base + DAC960_PD_ErrorStatusRegisterOffset);
+ return true;
+}
+
+static inline void DAC960_P_To_PD_TranslateEnquiry(void *Enquiry)
+{
+ memcpy(Enquiry + 132, Enquiry + 36, 64);
+ memset(Enquiry + 36, 0, 96);
+}
+
+static inline void DAC960_P_To_PD_TranslateDeviceState(void *DeviceState)
+{
+ memcpy(DeviceState + 2, DeviceState + 3, 1);
+ memmove(DeviceState + 4, DeviceState + 5, 2);
+ memmove(DeviceState + 6, DeviceState + 8, 4);
+}
+
+static inline
+void DAC960_PD_To_P_TranslateReadWriteCommand(DAC960_V1_CommandBlock_T *cmd_blk)
+{
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ int ldev_num = mbox->Type5.LD.LogicalDriveNumber;
+
+ mbox->Bytes[3] &= 0x7;
+ mbox->Bytes[3] |= mbox->Bytes[7] << 6;
+ mbox->Bytes[7] = ldev_num;
+}
+
+static inline
+void DAC960_P_To_PD_TranslateReadWriteCommand(DAC960_V1_CommandBlock_T *cmd_blk)
+{
+ DAC960_V1_CommandMailbox_T *mbox = &cmd_blk->mbox;
+ int ldev_num = mbox->Bytes[7];
+
+ mbox->Bytes[7] = mbox->Bytes[3] >> 6;
+ mbox->Bytes[3] &= 0x7;
+ mbox->Bytes[3] |= ldev_num << 3;
+}
+
+static unsigned short mylex_translate_ldev(DAC960_Controller_T *c,
+ struct scsi_device *sdev)
+{
+ unsigned short ldev_num;
+
+ ldev_num = sdev->id +
+ (sdev->channel - c->PhysicalChannelCount) * c->host->max_id;
+
+ return ldev_num;
+}
+
+/*
+ Define prototypes for the forward referenced DAC960 Driver Internal Functions.
+*/
+
+static irqreturn_t DAC960_BA_InterruptHandler(int, void *);
+static irqreturn_t DAC960_LP_InterruptHandler(int, void *);
+static irqreturn_t DAC960_LA_InterruptHandler(int, void *);
+static irqreturn_t DAC960_PG_InterruptHandler(int, void *);
+static irqreturn_t DAC960_PD_InterruptHandler(int, void *);
+static irqreturn_t DAC960_P_InterruptHandler(int, void *);
+static void DAC960_MonitoringWork(struct work_struct *work);
+
+#endif /* _MYLEX_H */
This patch is a conversion of the original DAC960 block driver to use the SCSI framework. Signed-off-by: Hannes Reinecke <hare@suse.com> --- drivers/scsi/Kconfig | 12 + drivers/scsi/Makefile | 1 + drivers/scsi/mylex.c | 6024 +++++++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/mylex.h | 4029 +++++++++++++++++++++++++++++++++ 4 files changed, 10066 insertions(+) create mode 100644 drivers/scsi/mylex.c create mode 100644 drivers/scsi/mylex.h