# HG changeset patch # User Dan McDonald # Date 1352321845 18000 # Node ID 267f693f357ee6cd8447a2121bc4a1e07910d994 # Parent 4eac7a87eff2120ae6e4ceadd2f396d208de1b78 3178 Support for LSI 2208 chipset in mr_sas Reviewed by: Kevin Crowe Reviewed by: Hans Rosenfeld Reviewed by: Garrett D'Amore Approved by: Richard Lowe diff -r 4eac7a87eff2 -r 267f693f357e usr/src/pkg/manifests/driver-storage-mr_sas.mf --- a/usr/src/pkg/manifests/driver-storage-mr_sas.mf Wed Nov 07 14:05:17 2012 -0800 +++ b/usr/src/pkg/manifests/driver-storage-mr_sas.mf Wed Nov 07 15:57:25 2012 -0500 @@ -21,6 +21,7 @@ # # Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright 2012 Nexenta Systems, Inc. All rights reserved. # # @@ -43,9 +44,13 @@ $(sparc_ONLY)driver name=mr_sas class=scsi-self-identifying \ alias=pci1000,78 \ alias=pci1000,79 \ + alias=pciex1000,5b \ + alias=pciex1000,5d \ alias=pciex1000,78 \ alias=pciex1000,79 $(i386_ONLY)driver name=mr_sas class=scsi-self-identifying \ + alias=pciex1000,5b \ + alias=pciex1000,5d \ alias=pciex1000,78 \ alias=pciex1000,79 file path=kernel/drv/$(ARCH64)/mr_sas group=sys diff -r 4eac7a87eff2 -r 267f693f357e usr/src/uts/common/Makefile.files --- a/usr/src/uts/common/Makefile.files Wed Nov 07 14:05:17 2012 -0800 +++ b/usr/src/uts/common/Makefile.files Wed Nov 07 15:57:25 2012 -0500 @@ -1995,7 +1995,7 @@ # # MR_SAS module # -MR_SAS_OBJS = mr_sas.o +MR_SAS_OBJS = ld_pd_map.o mr_sas.o mr_sas_tbolt.o mr_sas_list.o # # ISCSI_INITIATOR module diff -r 4eac7a87eff2 -r 267f693f357e usr/src/uts/common/io/mr_sas/fusion.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/usr/src/uts/common/io/mr_sas/fusion.h Wed Nov 07 15:57:25 2012 -0500 @@ -0,0 +1,561 @@ +/* + * fusion.h + * + * Solaris MegaRAID device driver for SAS2.0 controllers + * Copyright (c) 2008-2012, LSI Logic Corporation. + * All rights reserved. + * + * Version: + * Author: + * Swaminathan K S + * Arun Chandrashekhar + * Manju R + * Rasheed + * Shakeel Bukhari + */ + + +#ifndef _FUSION_H_ +#define _FUSION_H_ + +#define U64 uint64_t +#define U32 uint32_t +#define U16 uint16_t +#define U8 uint8_t +#define S8 char +#define S16 short +#define S32 int + +/* MPI2 defines */ +#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x6C) +#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */ +#define MPI2_WHOINIT_HOST_DRIVER (0x04) +#define MPI2_VERSION_MAJOR (0x02) +#define MPI2_VERSION_MINOR (0x00) +#define MPI2_VERSION_MAJOR_MASK (0xFF00) +#define MPI2_VERSION_MAJOR_SHIFT (8) +#define MPI2_VERSION_MINOR_MASK (0x00FF) +#define MPI2_VERSION_MINOR_SHIFT (0) +#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI2_VERSION_MINOR) +#define MPI2_HEADER_VERSION_UNIT (0x10) +#define MPI2_HEADER_VERSION_DEV (0x00) +#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) +#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) +#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) +#define MPI2_HEADER_VERSION_DEV_SHIFT (0) +#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT \ + << 8) | \ + MPI2_HEADER_VERSION_DEV) +#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) +#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) +#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ +#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) +#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) +#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) +#define MPI2_SCSIIO_CONTROL_READ (0x02000000) +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E) +#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) +#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) +#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) +#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) +#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) +#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) +#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) +#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) +#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) +#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) +#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) + +/* Invader defines */ +#define MPI2_TYPE_CUDA 0x2 +#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000 +#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00 +#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10 +#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80 +#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8 +#define MPI2_NSEG_FLAGS_SHIFT 4 + + +#define MR_PD_INVALID 0xFFFF +#define MAX_SPAN_DEPTH 8 +#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH) +#define MAX_ROW_SIZE 32 +#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) +#define MAX_LOGICAL_DRIVES 64 +#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) +#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) +#define MAX_ARRAYS 128 +#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS) +#define MAX_PHYSICAL_DEVICES 256 +#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) + +/* get the mapping information of LD */ +#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 + +#ifndef MPI2_POINTER +#define MPI2_POINTER * +#endif + +#pragma pack(1) + +typedef struct _MPI25_IEEE_SGE_CHAIN64 +{ + U64 Address; + U32 Length; + U16 Reserved1; + U8 NextChainOffset; + U8 Flags; +} MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64, + Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t; + +typedef struct _MPI2_SGE_SIMPLE_UNION +{ + U32 FlagsLength; + union + { + U32 Address32; + U64 Address64; + } u1; +} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION, + Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t; + +typedef struct +{ + U8 CDB[20]; /* 0x00 */ + U32 PrimaryReferenceTag; /* 0x14 */ + U16 PrimaryApplicationTag; /* 0x18 */ + U16 PrimaryApplicationTagMask; /* 0x1A */ + U32 TransferLength; /* 0x1C */ +} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32, + Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t; + +typedef struct _MPI2_SGE_CHAIN_UNION +{ + U16 Length; + U8 NextChainOffset; + U8 Flags; + union + { + U32 Address32; + U64 Address64; + } u1; +} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION, + Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t; + +typedef struct _MPI2_IEEE_SGE_SIMPLE32 +{ + U32 Address; + U32 FlagsLength; +} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32, + Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t; + +typedef struct _MPI2_IEEE_SGE_SIMPLE64 +{ + U64 Address; + U32 Length; + U16 Reserved1; + U8 Reserved2; + U8 Flags; +} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64, + Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t; + +typedef union _MPI2_IEEE_SGE_SIMPLE_UNION +{ + MPI2_IEEE_SGE_SIMPLE32 Simple32; + MPI2_IEEE_SGE_SIMPLE64 Simple64; +} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION, + Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t; + +typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; +typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; + +typedef union _MPI2_IEEE_SGE_CHAIN_UNION +{ + MPI2_IEEE_SGE_CHAIN32 Chain32; + MPI2_IEEE_SGE_CHAIN64 Chain64; +} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION, + Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t; + +typedef union _MPI2_SGE_IO_UNION +{ + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_SGE_CHAIN_UNION MpiChain; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; +} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION, + Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t; + +typedef union +{ + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_SGE_SIMPLE_UNION SGE; +} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION, + Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t; + +/* Default Request Descriptor */ +typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 DescriptorTypeDependent; /* 0x06 */ +} MPI2_DEFAULT_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, + Mpi2DefaultRequestDescriptor_t, + MPI2_POINTER pMpi2DefaultRequestDescriptor_t; + +/* High Priority Request Descriptor */ +typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 Reserved1; /* 0x06 */ +} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + Mpi2HighPriorityRequestDescriptor_t, + MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t; + +/* SCSI IO Request Descriptor */ +typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 DevHandle; /* 0x06 */ +} MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi2SCSIIORequestDescriptor_t, + MPI2_POINTER pMpi2SCSIIORequestDescriptor_t; + +/* SCSI Target Request Descriptor */ +typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 IoIndex; /* 0x06 */ +} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + Mpi2SCSITargetRequestDescriptor_t, + MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t; + +/* RAID Accelerator Request Descriptor */ +typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 Reserved; /* 0x06 */ +} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + Mpi2RAIDAcceleratorRequestDescriptor_t, + MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t; + +/* Default Reply Descriptor */ +typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 DescriptorTypeDependent1; /* 0x02 */ + U32 DescriptorTypeDependent2; /* 0x04 */ +} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, + Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t; + +/* Address Reply Descriptor */ +typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U32 ReplyFrameAddress; /* 0x04 */ +} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, + Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t; + +/* SCSI IO Success Reply Descriptor */ +typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 TaskTag; /* 0x04 */ + U16 Reserved1; /* 0x06 */ +} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi2SCSIIOSuccessReplyDescriptor_t, + MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t; + +/* TargetAssist Success Reply Descriptor */ +typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U8 SequenceNumber; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 IoIndex; /* 0x06 */ +} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + Mpi2TargetAssistSuccessReplyDescriptor_t, + MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t; + +/* Target Command Buffer Reply Descriptor */ +typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U8 VP_ID; /* 0x02 */ + U8 Flags; /* 0x03 */ + U16 InitiatorDevHandle; /* 0x04 */ + U16 IoIndex; /* 0x06 */ +} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + Mpi2TargetCommandBufferReplyDescriptor_t, + MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t; + +/* RAID Accelerator Success Reply Descriptor */ +typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U32 Reserved; /* 0x04 */ +} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, + MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; + +/* union of Reply Descriptors */ +typedef union _MPI2_REPLY_DESCRIPTORS_UNION +{ + MPI2_DEFAULT_REPLY_DESCRIPTOR Default; + MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; + MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; + MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; + MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; + U64 Words; +} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION, + Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t; + +/* IOCInit Request message */ +typedef struct _MPI2_IOC_INIT_REQUEST +{ + U8 WhoInit; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 MsgVersion; /* 0x0C */ + U16 HeaderVersion; /* 0x0E */ + U32 Reserved5; /* 0x10 */ + U16 Reserved6; /* 0x14 */ + U8 Reserved7; /* 0x16 */ + U8 HostMSIxVectors; /* 0x17 */ + U16 Reserved8; /* 0x18 */ + U16 SystemRequestFrameSize; /* 0x1A */ + U16 ReplyDescriptorPostQueueDepth; /* 0x1C */ + U16 ReplyFreeQueueDepth; /* 0x1E */ + U32 SenseBufferAddressHigh; /* 0x20 */ + U32 SystemReplyAddressHigh; /* 0x24 */ + U64 SystemRequestFrameBaseAddress; /* 0x28 */ + U64 ReplyDescriptorPostQueueAddress; /* 0x30 */ + U64 ReplyFreeQueueAddress; /* 0x38 */ + U64 TimeStamp; /* 0x40 */ +} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST, + Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t; + + +typedef struct _MR_DEV_HANDLE_INFO { + + /* Send bitmap of LDs that are idle with respect to FP */ + U16 curDevHdl; + + /* bitmap of valid device handles. */ + U8 validHandles; + U8 reserved; + /* 0x04 dev handles for all the paths. */ + U16 devHandle[2]; +} MR_DEV_HANDLE_INFO; /* 0x08, Total Size */ + +typedef struct _MR_ARRAY_INFO { + U16 pd[MAX_RAIDMAP_ROW_SIZE]; +} MR_ARRAY_INFO; /* 0x40, Total Size */ + +typedef struct _MR_QUAD_ELEMENT { + U64 logStart; /* 0x00 */ + U64 logEnd; /* 0x08 */ + U64 offsetInSpan; /* 0x10 */ + U32 diff; /* 0x18 */ + U32 reserved1; /* 0x1C */ +} MR_QUAD_ELEMENT; /* 0x20, Total size */ + +typedef struct _MR_SPAN_INFO { + U32 noElements; /* 0x00 */ + U32 reserved1; /* 0x04 */ + MR_QUAD_ELEMENT quads[MAX_RAIDMAP_SPAN_DEPTH]; /* 0x08 */ +} MR_SPAN_INFO; /* 0x108, Total size */ + +typedef struct _MR_LD_SPAN_ { /* SPAN structure */ + /* 0x00, starting block number in array */ + U64 startBlk; + + /* 0x08, number of blocks */ + U64 numBlks; + + /* 0x10, array reference */ + U16 arrayRef; + + U8 reserved[6]; /* 0x12 */ +} MR_LD_SPAN; /* 0x18, Total Size */ + +typedef struct _MR_SPAN_BLOCK_INFO { + /* number of rows/span */ + U64 num_rows; + + MR_LD_SPAN span; /* 0x08 */ + MR_SPAN_INFO block_span_info; /* 0x20 */ +} MR_SPAN_BLOCK_INFO; /* 0x128, Total Size */ + +typedef struct _MR_LD_RAID { + struct { + U32 fpCapable :1; + U32 reserved5 :3; + U32 ldPiMode :4; + U32 pdPiMode :4; + + /* FDE or controller encryption (MR_LD_ENCRYPTION_TYPE) */ + U32 encryptionType :8; + + U32 fpWriteCapable :1; + U32 fpReadCapable :1; + U32 fpWriteAcrossStripe:1; + U32 fpReadAcrossStripe:1; + U32 reserved4 :8; + } capability; /* 0x00 */ + U32 reserved6; + U64 size; /* 0x08, LD size in blocks */ + U8 spanDepth; /* 0x10, Total Number of Spans */ + U8 level; /* 0x11, RAID level */ + /* 0x12, shift-count to get stripe size (0=512, 1=1K, 7=64K, etc.) */ + U8 stripeShift; + U8 rowSize; /* 0x13, number of disks in a row */ + /* 0x14, number of data disks in a row */ + U8 rowDataSize; + U8 writeMode; /* 0x15, WRITE_THROUGH or WRITE_BACK */ + + /* 0x16, To differentiate between RAID1 and RAID1E */ + U8 PRL; + + U8 SRL; /* 0x17 */ + U16 targetId; /* 0x18, ld Target Id. */ + + /* 0x1a, state of ld, state corresponds to MR_LD_STATE */ + U8 ldState; + + /* 0x1b, Pre calculate region type requests based on MFC etc.. */ + U8 regTypeReqOnWrite; + + U8 modFactor; /* 0x1c, same as rowSize */ + /* + * 0x1d, region lock type used for read, valid only if + * regTypeOnReadIsValid=1 + */ + U8 regTypeReqOnRead; + U16 seqNum; /* 0x1e, LD sequence number */ + + struct { + /* This LD requires sync command before completing */ + U32 ldSyncRequired:1; + U32 reserved:31; + } flags; /* 0x20 */ + + U8 reserved3[0x5C]; /* 0x24 */ +} MR_LD_RAID; /* 0x80, Total Size */ + +typedef struct _MR_LD_SPAN_MAP { + MR_LD_RAID ldRaid; /* 0x00 */ + + /* 0x80, needed for GET_ARM() - R0/1/5 only. */ + U8 dataArmMap[MAX_RAIDMAP_ROW_SIZE]; + + MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH]; /* 0xA0 */ +} MR_LD_SPAN_MAP; /* 0x9E0 */ + +typedef struct _MR_FW_RAID_MAP { + /* total size of this structure, including this field */ + U32 totalSize; + union { + /* Simple method of version checking variables */ + struct { + U32 maxLd; + U32 maxSpanDepth; + U32 maxRowSize; + U32 maxPdCount; + U32 maxArrays; + } validationInfo; + U32 version[5]; + U32 reserved1[5]; + } u1; + + U32 ldCount; /* count of lds */ + U32 Reserved1; + + /* + * 0x20 This doesn't correspond to + * FW Ld Tgt Id to LD, but will purge. For example: if tgt Id is 4 + * and FW LD is 2, and there is only one LD, FW will populate the + * array like this. [0xFF, 0xFF, 0xFF, 0xFF, 0x0.....]. This is to + * help reduce the entire structure size if there are few LDs or + * driver is looking info for 1 LD only. + */ + U8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+ \ + MAX_RAIDMAP_VIEWS]; /* 0x20 */ + /* timeout value used by driver in FP IOs */ + U8 fpPdIoTimeoutSec; + U8 reserved2[7]; + MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS]; /* 0x00a8 */ + MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; + + /* 0x28a8-[0 -MAX_RAIDMAP_LOGICAL_DRIVES+MAX_RAIDMAP_VIEWS+1]; */ + MR_LD_SPAN_MAP ldSpanMap[1]; +}MR_FW_RAID_MAP; /* 0x3288, Total Size */ + +typedef struct _LD_TARGET_SYNC { + U8 ldTargetId; + U8 reserved; + U16 seqNum; +} LD_TARGET_SYNC; + +#pragma pack() + +struct IO_REQUEST_INFO { + U64 ldStartBlock; + U32 numBlocks; + U16 ldTgtId; + U8 isRead; + U16 devHandle; + U64 pdBlock; + U8 fpOkForIo; + U8 ldPI; +}; + +#endif /* _FUSION_H_ */ diff -r 4eac7a87eff2 -r 267f693f357e usr/src/uts/common/io/mr_sas/ld_pd_map.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/usr/src/uts/common/io/mr_sas/ld_pd_map.c Wed Nov 07 15:57:25 2012 -0500 @@ -0,0 +1,539 @@ +/* + * ********************************************************************** + * + * ld_pd_map.c + * + * Solaris MegaRAID device driver for SAS2.0 controllers + * Copyright (c) 2008-2012, LSI Logic Corporation. + * All rights reserved. + * + * Version: + * Author: + * Swaminathan K S + * Arun Chandrashekhar + * Manju R + * Rasheed + * Shakeel Bukhari + * + * + * This module contains functions for device drivers + * to get pd-ld mapping information. + * + * ********************************************************************** + */ + +#include +#include "mr_sas.h" +#include "ld_pd_map.h" + +/* + * This function will check if FAST IO is possible on this logical drive + * by checking the EVENT information available in the driver + */ +#define MR_LD_STATE_OPTIMAL 3 +#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) + +static void mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *, + PLD_LOAD_BALANCE_INFO); + +#define FALSE 0 +#define TRUE 1 + +typedef U64 REGION_KEY; +typedef U32 REGION_LEN; +extern int debug_level_g; + + +MR_LD_RAID +*MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map) +{ + return (&map->raidMap.ldSpanMap[ld].ldRaid); +} + +U16 +MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map) +{ + return (map->raidMap.ldSpanMap[ld].ldRaid.targetId); +} + + +static MR_SPAN_BLOCK_INFO * +MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map) +{ + return (&map->raidMap.ldSpanMap[ld].spanBlock[0]); +} + +static U8 +MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map) +{ + return (map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]); +} + +static U16 +MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map) +{ + return (map->raidMap.arMapInfo[ar].pd[arm]); +} + +static U16 +MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map) +{ + return (map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); +} + +static U16 +MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map) +{ + return (map->raidMap.devHndlInfo[pd].curDevHdl); +} + +U16 +MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map) +{ + return (map->raidMap.ldTgtIdToLd[ldTgtId]); +} + +U16 +MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map) +{ + MR_LD_RAID *raid; + U32 ld; + + ld = MR_TargetIdToLdGet(ldTgtId, map); + + if (ld >= MAX_LOGICAL_DRIVES) { + return (FALSE); + } + + raid = MR_LdRaidGet(ld, map); + + return (raid->capability.ldPiMode == 0x8); +} + +static MR_LD_SPAN * +MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map) +{ + return (&map->raidMap.ldSpanMap[ld].spanBlock[span].span); +} + +/* + * This function will validate Map info data provided by FW + */ +U8 +MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo) +{ + MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; + U32 fwsize = sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) + + (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount); + + if (pFwRaidMap->totalSize != fwsize) { + + con_log(CL_ANN1, (CE_NOTE, + "map info structure size 0x%x is " + "not matching with ld count\n", fwsize)); + /* sizeof (foo) returns size_t, which is *LONG*. */ + con_log(CL_ANN1, (CE_NOTE, "span map 0x%x total size 0x%x\n",\ + (int)sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize)); + + return (0); + } + + mr_update_load_balance_params(map, lbInfo); + + return (1); +} + +U32 +MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map, + int *div_error) +{ + MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); + MR_QUAD_ELEMENT *qe; + MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + U32 span, j; + + for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { + for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { + qe = &pSpanBlock->block_span_info.quads[j]; + if (qe->diff == 0) { + *div_error = 1; + return (span); + } + if (qe->logStart <= row && row <= qe->logEnd && + (((row - qe->logStart) % qe->diff)) == 0) { + if (span_blk != NULL) { + U64 blk; + blk = ((row - qe->logStart) / + (qe->diff)); + + blk = (blk + qe->offsetInSpan) << + raid->stripeShift; + *span_blk = blk; + } + return (span); + } + } + } + return (span); +} + + +/* + * ************************************************************* + * + * This routine calculates the arm, span and block for + * the specified stripe and reference in stripe. + * + * Inputs : + * + * ld - Logical drive number + * stripRow - Stripe number + * stripRef - Reference in stripe + * + * Outputs : + * + * span - Span number + * block - Absolute Block number in the physical disk + */ +U8 +MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow, + U16 stripRef, U64 *pdBlock, U16 *pDevHandle, + MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map) +{ + MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + U32 pd, arRef; + U8 physArm, span; + U64 row; + int error_code = 0; + U8 retval = TRUE; + U32 rowMod; + U32 armQ; + U32 arm; + + ASSERT(raid->rowDataSize != 0); + + row = (stripRow / raid->rowDataSize); + + if (raid->level == 6) { + U32 logArm = (stripRow % (raid->rowDataSize)); + + if (raid->rowSize == 0) { + return (FALSE); + } + rowMod = (row % (raid->rowSize)); + armQ = raid->rowSize-1-rowMod; + arm = armQ + 1 + logArm; + if (arm >= raid->rowSize) + arm -= raid->rowSize; + physArm = (U8)arm; + } else { + if (raid->modFactor == 0) + return (FALSE); + physArm = MR_LdDataArmGet(ld, + (stripRow % (raid->modFactor)), map); + } + if (raid->spanDepth == 1) { + span = 0; + *pdBlock = row << raid->stripeShift; + } else + span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); + + if (error_code == 1) + return (FALSE); + + /* Get the array on which this span is present. */ + arRef = MR_LdSpanArrayGet(ld, span, map); + /* Get the Pd. */ + pd = MR_ArPdGet(arRef, physArm, map); + /* Get dev handle from Pd. */ + if (pd != MR_PD_INVALID) { + *pDevHandle = MR_PdDevHandleGet(pd, map); + } else { + *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ + if ((raid->level >= 5) && + ((instance->device_id != PCI_DEVICE_ID_LSI_INVADER) || + (instance->device_id == PCI_DEVICE_ID_LSI_INVADER && + raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) { + pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; + } else if (raid->level == 1) { + /* Get Alternate Pd. */ + pd = MR_ArPdGet(arRef, physArm + 1, map); + /* Get dev handle from Pd. */ + if (pd != MR_PD_INVALID) + *pDevHandle = MR_PdDevHandleGet(pd, map); + } + } + + *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; + + pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | + physArm; + + return (retval); +} + + + +/* + * *********************************************************************** + * + * MR_BuildRaidContext function + * + * This function will initiate command processing. The start/end row and strip + * information is calculated then the lock is acquired. + * This function will return 0 if region lock + * was acquired OR return num strips ??? + */ + +U8 +MR_BuildRaidContext(struct mrsas_instance *instance, + struct IO_REQUEST_INFO *io_info, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, + MR_FW_RAID_MAP_ALL *map) +{ + MR_LD_RAID *raid; + U32 ld, stripSize, stripe_mask; + U64 endLba, endStrip, endRow; + U64 start_row, start_strip; + REGION_KEY regStart; + REGION_LEN regSize; + U8 num_strips, numRows; + U16 ref_in_start_stripe; + U16 ref_in_end_stripe; + + U64 ldStartBlock; + U32 numBlocks, ldTgtId; + U8 isRead; + U8 retval = 0; + + ldStartBlock = io_info->ldStartBlock; + numBlocks = io_info->numBlocks; + ldTgtId = io_info->ldTgtId; + isRead = io_info->isRead; + + if (map == NULL) { + io_info->fpOkForIo = FALSE; + return (FALSE); + } + + ld = MR_TargetIdToLdGet(ldTgtId, map); + + if (ld >= MAX_LOGICAL_DRIVES) { + io_info->fpOkForIo = FALSE; + return (FALSE); + } + + raid = MR_LdRaidGet(ld, map); + + stripSize = 1 << raid->stripeShift; + stripe_mask = stripSize-1; + /* + * calculate starting row and stripe, and number of strips and rows + */ + start_strip = ldStartBlock >> raid->stripeShift; + ref_in_start_stripe = (U16)(ldStartBlock & stripe_mask); + endLba = ldStartBlock + numBlocks - 1; + ref_in_end_stripe = (U16)(endLba & stripe_mask); + endStrip = endLba >> raid->stripeShift; + num_strips = (U8)(endStrip - start_strip + 1); + /* Check to make sure is not dividing by zero */ + if (raid->rowDataSize == 0) + return (FALSE); + start_row = (start_strip / raid->rowDataSize); + endRow = (endStrip / raid->rowDataSize); + /* get the row count */ + numRows = (U8)(endRow - start_row + 1); + + /* + * calculate region info. + */ + regStart = start_row << raid->stripeShift; + regSize = stripSize; + + /* Check if we can send this I/O via FastPath */ + if (raid->capability.fpCapable) { + if (isRead) { + io_info->fpOkForIo = (raid->capability.fpReadCapable && + ((num_strips == 1) || + raid->capability.fpReadAcrossStripe)); + } else { + io_info->fpOkForIo = + (raid->capability.fpWriteCapable && + ((num_strips == 1) || + raid->capability.fpWriteAcrossStripe)); + } + } else + io_info->fpOkForIo = FALSE; + + + /* + * Check for DIF support + */ + if (!raid->capability.ldPiMode) { + io_info->ldPI = FALSE; + } else { + io_info->ldPI = TRUE; + } + + if (numRows == 1) { + if (num_strips == 1) { + regStart += ref_in_start_stripe; + regSize = numBlocks; + } + } else { + if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { + regStart += ref_in_start_stripe; + regSize = stripSize - ref_in_start_stripe; + } + + if (numRows > 2) { + regSize += (numRows - 2) << raid->stripeShift; + } + + if (endStrip == endRow * raid->rowDataSize) { + regSize += ref_in_end_stripe + 1; + } else { + regSize += stripSize; + } + } + + pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; + + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) { + pRAID_Context->regLockFlags = (isRead) ? + raid->regTypeReqOnRead : raid->regTypeReqOnWrite; + } else { + pRAID_Context->regLockFlags = (isRead) ? + REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; + } + + pRAID_Context->ldTargetId = raid->targetId; + pRAID_Context->regLockRowLBA = regStart; + pRAID_Context->regLockLength = regSize; + pRAID_Context->configSeqNum = raid->seqNum; + + /* + * Get Phy Params only if FP capable, + * or else leave it to MR firmware to do the calculation. + */ + if (io_info->fpOkForIo) { + /* if fast path possible then get the physical parameters */ + retval = MR_GetPhyParams(instance, ld, start_strip, + ref_in_start_stripe, &io_info->pdBlock, + &io_info->devHandle, pRAID_Context, map); + + /* If IO on an invalid Pd, then FP is not possible. */ + if (io_info->devHandle == MR_PD_INVALID) + io_info->fpOkForIo = FALSE; + + return (retval); + + } else if (isRead) { + uint_t stripIdx; + + for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { + if (!MR_GetPhyParams(instance, ld, + start_strip + stripIdx, ref_in_start_stripe, + &io_info->pdBlock, &io_info->devHandle, + pRAID_Context, map)) { + return (TRUE); + } + } + } + return (TRUE); +} + + +void +mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, + PLD_LOAD_BALANCE_INFO lbInfo) +{ + int ldCount; + U16 ld; + MR_LD_RAID *raid; + + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { + ld = MR_TargetIdToLdGet(ldCount, map); + + if (ld >= MAX_LOGICAL_DRIVES) { + con_log(CL_ANN1, + (CE_NOTE, "mrsas: ld=%d Invalid ld \n", ld)); + continue; + } + + raid = MR_LdRaidGet(ld, map); + + /* Two drive Optimal RAID 1 */ + if ((raid->level == 1) && (raid->rowSize == 2) && + (raid->spanDepth == 1) && + raid->ldState == MR_LD_STATE_OPTIMAL) { + U32 pd, arRef; + + lbInfo[ldCount].loadBalanceFlag = 1; + + /* Get the array on which this span is present. */ + arRef = MR_LdSpanArrayGet(ld, 0, map); + + pd = MR_ArPdGet(arRef, 0, map); /* Get the Pd. */ + /* Get dev handle from Pd. */ + lbInfo[ldCount].raid1DevHandle[0] = + MR_PdDevHandleGet(pd, map); + + pd = MR_ArPdGet(arRef, 1, map); /* Get the Pd. */ + /* Get dev handle from Pd. */ + lbInfo[ldCount].raid1DevHandle[1] = + MR_PdDevHandleGet(pd, map); + con_log(CL_ANN1, (CE_NOTE, + "mrsas: ld=%d load balancing enabled \n", ldCount)); + } else { + lbInfo[ldCount].loadBalanceFlag = 0; + } + } +} + + +U8 +megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block, + U32 count) +{ + U16 pend0, pend1; + U64 diff0, diff1; + U8 bestArm; + + /* get the pending cmds for the data and mirror arms */ + pend0 = lbInfo->scsi_pending_cmds[0]; + pend1 = lbInfo->scsi_pending_cmds[1]; + + /* Determine the disk whose head is nearer to the req. block */ + diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]); + diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]); + bestArm = (diff0 <= diff1 ? 0 : 1); + + if ((bestArm == arm && pend0 > pend1 + 16) || + (bestArm != arm && pend1 > pend0 + 16)) { + bestArm ^= 1; + } + + /* Update the last accessed block on the correct pd */ + lbInfo->last_accessed_block[bestArm] = block + count - 1; + return (bestArm); +} + +U16 +get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, + struct IO_REQUEST_INFO *io_info) +{ + U8 arm, old_arm; + U16 devHandle; + + old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1; + + /* get best new arm */ + arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, + io_info->numBlocks); + + devHandle = lbInfo->raid1DevHandle[arm]; + + lbInfo->scsi_pending_cmds[arm]++; + + return (devHandle); +} diff -r 4eac7a87eff2 -r 267f693f357e usr/src/uts/common/io/mr_sas/ld_pd_map.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/usr/src/uts/common/io/mr_sas/ld_pd_map.h Wed Nov 07 15:57:25 2012 -0500 @@ -0,0 +1,249 @@ +/* + * ld_pd_map.h + * + * Solaris MegaRAID device driver for SAS2.0 controllers + * Copyright (c) 2008-2012, LSI Logic Corporation. + * All rights reserved. + * + * Version: + * Author: + * Swaminathan K S + * Arun Chandrashekhar + * Manju R + * Rasheed + * Shakeel Bukhari + */ + +#ifndef _LD_PD_MAP +#define _LD_PD_MAP +#include +#include "fusion.h" + +struct mrsas_instance; /* This will be defined in mr_sas.h */ + +/* raid->write_mode; raid->read_ahead; dcmd->state */ +/* Write through */ +#define WRITE_THROUGH 0 +/* Delayed Write */ +#define WRITE_BACK 1 + +/* SCSI CDB definitions */ +#define READ_6 0x08 +#define READ_16 0x88 +#define READ_10 0x28 +#define READ_12 0xA8 +#define WRITE_16 0x8A +#define WRITE_10 0x2A + +/* maximum disks per array */ +#define MAX_ROW_SIZE 32 +/* maximum spans per logical drive */ +#define MAX_SPAN_DEPTH 8 +#define MEGASAS_LOAD_BALANCE_FLAG 0x1 +#define MR_DEFAULT_IO_TIMEOUT 20 + + +union desc_value { + U64 word; + struct { + U32 low; + U32 high; + } u1; +}; + +typedef struct _LD_LOAD_BALANCE_INFO +{ + U8 loadBalanceFlag; + U8 reserved1; + U16 raid1DevHandle[2]; + U16 scsi_pending_cmds[2]; + U64 last_accessed_block[2]; +} LD_LOAD_BALANCE_INFO, *PLD_LOAD_BALANCE_INFO; + +#pragma pack(1) +typedef struct _MR_FW_RAID_MAP_ALL { + MR_FW_RAID_MAP raidMap; + MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1]; +} MR_FW_RAID_MAP_ALL; + +/* + * Raid Context structure which describes MegaRAID specific IO Parameters + * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames + */ +typedef struct _MPI2_SCSI_IO_VENDOR_UNIQUE { + U8 nsegType; /* 0x00 nseg[7:4], Type[3:0] */ + U8 resvd0; /* 0x01 */ + U16 timeoutValue; /* 0x02 -0x03 */ + U8 regLockFlags; /* 0x04 */ + U8 reservedForHw1; /* 0x05 */ + U16 ldTargetId; /* 0x06 - 0x07 */ + U64 regLockRowLBA; /* 0x08 - 0x0F */ + U32 regLockLength; /* 0x10 - 0x13 */ + U16 nextLMId; /* 0x14 - 0x15 */ + U8 extStatus; /* 0x16 */ + U8 status; /* 0x17 status */ + U8 RAIDFlags; /* 0x18 resvd[7:6], ioSubType[5:4], */ + /* resvd[3:1], preferredCpu[0] */ + U8 numSGE; /* 0x19 numSge; not including chain entries */ + U16 configSeqNum; /* 0x1A -0x1B */ + U8 spanArm; /* 0x1C span[7:5], arm[4:0] */ + U8 resvd2[3]; /* 0x1D-0x1f */ +} MPI2_SCSI_IO_VENDOR_UNIQUE, MPI25_SCSI_IO_VENDOR_UNIQUE; + +#define RAID_CTX_SPANARM_ARM_SHIFT (0) +#define RAID_CTX_SPANARM_ARM_MASK (0x1f) + +#define RAID_CTX_SPANARM_SPAN_SHIFT (5) +#define RAID_CTX_SPANARM_SPAN_MASK (0xE0) + + +/* + * RAID SCSI IO Request Message + * Total SGE count will be one less + * than _MPI2_SCSI_IO_REQUEST + */ +typedef struct _MPI2_RAID_SCSI_IO_REQUEST +{ + uint16_t DevHandle; /* 0x00 */ + uint8_t ChainOffset; /* 0x02 */ + uint8_t Function; /* 0x03 */ + uint16_t Reserved1; /* 0x04 */ + uint8_t Reserved2; /* 0x06 */ + uint8_t MsgFlags; /* 0x07 */ + uint8_t VP_ID; /* 0x08 */ + uint8_t VF_ID; /* 0x09 */ + uint16_t Reserved3; /* 0x0A */ + uint32_t SenseBufferLowAddress; /* 0x0C */ + uint16_t SGLFlags; /* 0x10 */ + uint8_t SenseBufferLength; /* 0x12 */ + uint8_t Reserved4; /* 0x13 */ + uint8_t SGLOffset0; /* 0x14 */ + uint8_t SGLOffset1; /* 0x15 */ + uint8_t SGLOffset2; /* 0x16 */ + uint8_t SGLOffset3; /* 0x17 */ + uint32_t SkipCount; /* 0x18 */ + uint32_t DataLength; /* 0x1C */ + uint32_t BidirectionalDataLength; /* 0x20 */ + uint16_t IoFlags; /* 0x24 */ + uint16_t EEDPFlags; /* 0x26 */ + uint32_t EEDPBlockSize; /* 0x28 */ + uint32_t SecondaryReferenceTag; /* 0x2C */ + uint16_t SecondaryApplicationTag; /* 0x30 */ + uint16_t ApplicationTagTranslationMask; /* 0x32 */ + uint8_t LUN[8]; /* 0x34 */ + uint32_t Control; /* 0x3C */ + Mpi2ScsiIoCdb_t CDB; /* 0x40 */ + MPI2_SCSI_IO_VENDOR_UNIQUE RaidContext; /* 0x60 */ + Mpi2SGEIOUnion_t SGL; /* 0x80 */ +} MPI2_RAID_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_RAID_SCSI_IO_REQUEST, +Mpi2RaidSCSIIORequest_t, MPI2_POINTER pMpi2RaidSCSIIORequest_t; + +/* + * define region lock types + */ +typedef enum _REGION_TYPE { + REGION_TYPE_UNUSED = 0, /* lock is currently not active */ + REGION_TYPE_SHARED_READ = 1, /* shared lock (for reads) */ + REGION_TYPE_SHARED_WRITE = 2, + REGION_TYPE_EXCLUSIVE = 3 /* exclusive lock (for writes) */ +} REGION_TYPE; + + +#define DM_PATH_MAXPATH 2 +#define DM_PATH_FIRSTPATH 0 +#define DM_PATH_SECONDPATH 1 + +/* declare valid Region locking values */ +typedef enum _REGION_LOCK { + REGION_LOCK_BYPASS = 0, + /* for RAID 6 single-drive failure */ + REGION_LOCK_UNCOND_SHARED_READ = 1, + REGION_LOCK_UNCOND_SHARED_WRITE = 2, + REGION_LOCK_UNCOND_SHARED_OTHER = 3, + REGION_LOCK_UNCOND_SHARED_EXCLUSIVE = 0xFF +} REGION_LOCK; + + +struct mrsas_init_frame2 { + uint8_t cmd; /* 00h */ + uint8_t reserved_0; /* 01h */ + uint8_t cmd_status; /* 02h */ + + uint8_t reserved_1; /* 03h */ + uint32_t reserved_2; /* 04h */ + + uint32_t context; /* 08h */ + uint32_t pad_0; /* 0Ch */ + + uint16_t flags; /* 10h */ + uint16_t reserved_3; /* 12h */ + uint32_t data_xfer_len; /* 14h */ + + uint32_t queue_info_new_phys_addr_lo; /* 18h */ + uint32_t queue_info_new_phys_addr_hi; /* 1Ch */ + uint32_t queue_info_old_phys_addr_lo; /* 20h */ + uint32_t queue_info_old_phys_addr_hi; /* 24h */ + uint64_t driverversion; /* 28h */ + uint32_t reserved_4[4]; /* 30h */ +}; + + +/* + * Request descriptor types + */ +#define MPI2_REQ_DESCRIPT_FLAGS_LD_IO 0x7 +#define MPI2_REQ_DESCRIPT_FLAGS_MFA 0x1 +#define MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2 + +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1 + + +/* + * MPT RAID MFA IO Descriptor. + */ +typedef struct _MR_RAID_MFA_IO_DESCRIPTOR { + uint32_t RequestFlags : 8; + uint32_t MessageAddress1 : 24; /* bits 31:8 */ + uint32_t MessageAddress2; /* bits 61:32 */ +} MR_RAID_MFA_IO_REQUEST_DESCRIPTOR, +*PMR_RAID_MFA_IO_REQUEST_DESCRIPTOR; + +/* union of Request Descriptors */ +typedef union _MRSAS_REQUEST_DESCRIPTOR_UNION +{ + MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; + MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; + MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; + MR_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo; + U64 Words; +} MRSAS_REQUEST_DESCRIPTOR_UNION; + +#pragma pack() + +enum { + MRSAS_SCSI_VARIABLE_LENGTH_CMD = 0x7F, + MRSAS_SCSI_SERVICE_ACTION_READ32 = 0x9, + MRSAS_SCSI_SERVICE_ACTION_WRITE32 = 0xB, + MRSAS_SCSI_ADDL_CDB_LEN = 0x18, + MRSAS_RD_WR_PROTECT = 0x20, + MRSAS_EEDPBLOCKSIZE = 512 +}; + + +#define IEEE_SGE_FLAGS_ADDR_MASK (0x03) +#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) +#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) +#define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) +#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) +#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) +#define IEEE_SGE_FLAGS_END_OF_LIST (0x40) + + +U8 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo); +U16 MR_CheckDIF(U32, MR_FW_RAID_MAP_ALL *); +U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *, + MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *); + +#endif /* _LD_PD_MAP */ diff -r 4eac7a87eff2 -r 267f693f357e usr/src/uts/common/io/mr_sas/mr_sas.c --- a/usr/src/uts/common/io/mr_sas/mr_sas.c Wed Nov 07 14:05:17 2012 -0800 +++ b/usr/src/uts/common/io/mr_sas/mr_sas.c Wed Nov 07 15:57:25 2012 -0500 @@ -1,16 +1,17 @@ /* * mr_sas.c: source for mr_sas driver * - * MegaRAID device driver for SAS2.0 controllers - * Copyright (c) 2008-2010, LSI Logic Corporation. + * Solaris MegaRAID device driver for SAS2.0 controllers + * Copyright (c) 2008-2012, LSI Logic Corporation. * All rights reserved. * * Version: * Author: + * Swaminathan K S * Arun Chandrashekhar * Manju R - * Rajesh Prabhakaran - * Seokmann Ju + * Rasheed + * Shakeel Bukhari * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -43,6 +44,7 @@ /* * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011 Bayard G. Bell. All rights reserved. + * Copyright 2012 Nexenta System, Inc. All rights reserved. */ #include @@ -83,15 +85,14 @@ */ static void *mrsas_state = NULL; static volatile boolean_t mrsas_relaxed_ordering = B_TRUE; -static volatile int debug_level_g = CL_NONE; -static volatile int msi_enable = 1; +volatile int debug_level_g = CL_NONE; +static volatile int msi_enable = 1; static volatile int ctio_enable = 1; /* Default Timeout value to issue online controller reset */ -static volatile int debug_timeout_g = 0xB4; +volatile int debug_timeout_g = 0xF0; /* 0xB4; */ /* Simulate consecutive firmware fault */ static volatile int debug_fw_faults_after_ocr_g = 0; - #ifdef OCRDEBUG /* Simulate three consecutive timeout for an IO */ static volatile int debug_consecutive_timeout_after_ocr_g = 0; @@ -101,11 +102,132 @@ #pragma weak scsi_hba_close #pragma weak scsi_hba_ioctl -static ddi_dma_attr_t mrsas_generic_dma_attr = { +/* Local static prototypes. */ +static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); +static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t); +#ifdef __sparc +static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t); +#else +static int mrsas_quiesce(dev_info_t *); +#endif +static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t); +static int mrsas_open(dev_t *, int, int, cred_t *); +static int mrsas_close(dev_t, int, int, cred_t *); +static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); + +static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *, + scsi_hba_tran_t *, struct scsi_device *); +static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register + struct scsi_pkt *, struct buf *, int, int, int, int, + int (*)(), caddr_t); +static int mrsas_tran_start(struct scsi_address *, + register struct scsi_pkt *); +static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *); +static int mrsas_tran_reset(struct scsi_address *, int); +static int mrsas_tran_getcap(struct scsi_address *, char *, int); +static int mrsas_tran_setcap(struct scsi_address *, char *, int, int); +static void mrsas_tran_destroy_pkt(struct scsi_address *, + struct scsi_pkt *); +static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *); +static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *); +static int mrsas_tran_quiesce(dev_info_t *dip); +static int mrsas_tran_unquiesce(dev_info_t *dip); +static uint_t mrsas_isr(); +static uint_t mrsas_softintr(); +static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *); +static struct mrsas_cmd *get_mfi_pkt(struct mrsas_instance *); +static void return_mfi_pkt(struct mrsas_instance *, + struct mrsas_cmd *); + +static void free_space_for_mfi(struct mrsas_instance *); +static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *); +static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *); +static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *, + struct mrsas_cmd *); +static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *, + struct mrsas_cmd *); +static void enable_intr_ppc(struct mrsas_instance *); +static void disable_intr_ppc(struct mrsas_instance *); +static int intr_ack_ppc(struct mrsas_instance *); +static void flush_cache(struct mrsas_instance *instance); +void display_scsi_inquiry(caddr_t); +static int start_mfi_aen(struct mrsas_instance *instance); +static int handle_drv_ioctl(struct mrsas_instance *instance, + struct mrsas_ioctl *ioctl, int mode); +static int handle_mfi_ioctl(struct mrsas_instance *instance, + struct mrsas_ioctl *ioctl, int mode); +static int handle_mfi_aen(struct mrsas_instance *instance, + struct mrsas_aen *aen); +static struct mrsas_cmd *build_cmd(struct mrsas_instance *, + struct scsi_address *, struct scsi_pkt *, uchar_t *); +static int alloc_additional_dma_buffer(struct mrsas_instance *); +static void complete_cmd_in_sync_mode(struct mrsas_instance *, + struct mrsas_cmd *); +static int mrsas_kill_adapter(struct mrsas_instance *); +static int mrsas_issue_init_mfi(struct mrsas_instance *); +static int mrsas_reset_ppc(struct mrsas_instance *); +static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *); +static int wait_for_outstanding(struct mrsas_instance *instance); +static int register_mfi_aen(struct mrsas_instance *instance, + uint32_t seq_num, uint32_t class_locale_word); +static int issue_mfi_pthru(struct mrsas_instance *instance, struct + mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); +static int issue_mfi_dcmd(struct mrsas_instance *instance, struct + mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); +static int issue_mfi_smp(struct mrsas_instance *instance, struct + mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); +static int issue_mfi_stp(struct mrsas_instance *instance, struct + mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); +static int abort_aen_cmd(struct mrsas_instance *instance, + struct mrsas_cmd *cmd_to_abort); + +static void mrsas_rem_intrs(struct mrsas_instance *instance); +static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type); + +static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *, + scsi_hba_tran_t *, struct scsi_device *); +static int mrsas_tran_bus_config(dev_info_t *, uint_t, + ddi_bus_config_op_t, void *, dev_info_t **); +static int mrsas_parse_devname(char *, int *, int *); +static int mrsas_config_all_devices(struct mrsas_instance *); +static int mrsas_config_ld(struct mrsas_instance *, uint16_t, + uint8_t, dev_info_t **); +static int mrsas_name_node(dev_info_t *, char *, int); +static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *); +static void free_additional_dma_buffer(struct mrsas_instance *); +static void io_timeout_checker(void *); +static void mrsas_fm_init(struct mrsas_instance *); +static void mrsas_fm_fini(struct mrsas_instance *); + +static struct mrsas_function_template mrsas_function_template_ppc = { + .read_fw_status_reg = read_fw_status_reg_ppc, + .issue_cmd = issue_cmd_ppc, + .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc, + .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc, + .enable_intr = enable_intr_ppc, + .disable_intr = disable_intr_ppc, + .intr_ack = intr_ack_ppc, + .init_adapter = mrsas_init_adapter_ppc +}; + + +static struct mrsas_function_template mrsas_function_template_fusion = { + .read_fw_status_reg = tbolt_read_fw_status_reg, + .issue_cmd = tbolt_issue_cmd, + .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode, + .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode, + .enable_intr = tbolt_enable_intr, + .disable_intr = tbolt_disable_intr, + .intr_ack = tbolt_intr_ack, + .init_adapter = mrsas_init_adapter_tbolt +}; + + +ddi_dma_attr_t mrsas_generic_dma_attr = { DMA_ATTR_V0, /* dma_attr_version */ 0, /* low DMA address range */ 0xFFFFFFFFU, /* high DMA address range */ - 0xFFFFFFFFU, /* DMA counter register */ + 0xFFFFFFFFU, /* DMA counter register */ 8, /* DMA address alignment */ 0x07, /* DMA burstsizes */ 1, /* min DMA size */ @@ -119,6 +241,12 @@ int32_t mrsas_max_cap_maxxfer = 0x1000000; /* + * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG, + * Limit size to 256K + */ +uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512); + +/* * cb_ops contains base level routines */ static struct cb_ops mrsas_cb_ops = { @@ -153,18 +281,18 @@ nulldev, /* probe */ mrsas_attach, /* attach */ mrsas_detach, /* detach */ -#ifdef __sparc +#ifdef __sparc mrsas_reset, /* reset */ #else /* __sparc */ nodev, -#endif /* __sparc */ +#endif /* __sparc */ &mrsas_cb_ops, /* char/block ops */ NULL, /* bus ops */ NULL, /* power */ -#ifdef __sparc +#ifdef __sparc ddi_quiesce_not_needed #else /* __sparc */ - mrsas_quiesce /* quiesce */ + mrsas_quiesce /* quiesce */ #endif /* __sparc */ }; @@ -187,15 +315,29 @@ DDI_DEFAULT_ACC }; +/* Use the LSI Fast Path for the 2208 (tbolt) commands. */ +unsigned int enable_fp = 1; + /* * ************************************************************************** * - * * - * common entry points - for loadable kernel modules * - * * + * * + * common entry points - for loadable kernel modules * + * * * ************************************************************************** * */ +/* + * _init - initialize a loadable module + * @void + * + * The driver should perform any one-time resource allocation or data + * initialization during driver loading in _init(). For example, the driver + * should initialize any mutexes global to the driver in this routine. + * The driver should not, however, use _init() to allocate or initialize + * anything that has to do with a particular instance of the device. + * Per-instance initialization must be done in attach(). + */ int _init(void) { @@ -207,12 +349,12 @@ sizeof (struct mrsas_instance), 0); if (ret != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, "mr_sas: could not init state")); + cmn_err(CE_WARN, "mr_sas: could not init state"); return (ret); } if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, "mr_sas: could not init scsi hba")); + cmn_err(CE_WARN, "mr_sas: could not init scsi hba"); ddi_soft_state_fini(&mrsas_state); return (ret); } @@ -220,7 +362,7 @@ ret = mod_install(&modlinkage); if (ret != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, "mr_sas: mod_install failed")); + cmn_err(CE_WARN, "mr_sas: mod_install failed"); scsi_hba_fini(&modlinkage); ddi_soft_state_fini(&mrsas_state); } @@ -228,6 +370,13 @@ return (ret); } +/* + * _info - returns information about a loadable module. + * @void + * + * _info() is called to return module information. This is a typical entry + * point that does predefined role. It simply calls mod_info(). + */ int _info(struct modinfo *modinfop) { @@ -236,6 +385,13 @@ return (mod_info(&modlinkage, modinfop)); } +/* + * _fini - prepare a loadable module for unloading + * @void + * + * In _fini(), the driver should release any resources that were allocated in + * _init(). The driver must remove itself from the system module list. + */ int _fini(void) { @@ -243,12 +399,17 @@ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); - if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) + if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) { + con_log(CL_ANN1, + (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret)); return (ret); + } scsi_hba_fini(&modlinkage); + con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done.")); ddi_soft_state_fini(&mrsas_state); + con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done.")); return (ret); } @@ -256,24 +417,41 @@ /* * ************************************************************************** * - * * - * common entry points - for autoconfiguration * - * * + * * + * common entry points - for autoconfiguration * + * * * ************************************************************************** * */ - +/* + * attach - adds a device to the system as part of initialization + * @dip: + * @cmd: + * + * The kernel calls a driver's attach() entry point to attach an instance of + * a device (for MegaRAID, it is instance of a controller) or to resume + * operation for an instance of a device that has been suspended or has been + * shut down by the power management framework + * The attach() entry point typically includes the following types of + * processing: + * - allocate a soft-state structure for the device instance (for MegaRAID, + * controller instance) + * - initialize per-instance mutexes + * - initialize condition variables + * - register the device's interrupts (for MegaRAID, controller's interrupts) + * - map the registers and memory of the device instance (for MegaRAID, + * controller instance) + * - create minor device nodes for the device instance (for MegaRAID, + * controller instance) + * - report that the device instance (for MegaRAID, controller instance) has + * attached + */ static int mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance_no; int nregs; - uint8_t added_isr_f = 0; - uint8_t added_soft_isr_f = 0; - uint8_t create_devctl_node_f = 0; - uint8_t create_scsi_node_f = 0; - uint8_t create_ioc_node_f = 0; - uint8_t tran_alloc_f = 0; - uint8_t irq; + int i = 0; + uint8_t irq; uint16_t vendor_id; uint16_t device_id; uint16_t subsysvid; @@ -284,7 +462,7 @@ char *data; scsi_hba_tran_t *tran; - ddi_dma_attr_t tran_dma_attr; + ddi_dma_attr_t tran_dma_attr; struct mrsas_instance *instance; con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); @@ -298,481 +476,508 @@ * check to see whether this device is in a DMA-capable slot. */ if (ddi_slaveonly(dip) == DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, + cmn_err(CE_WARN, "mr_sas%d: Device in slave-only slot, unused", - instance_no)); + instance_no); return (DDI_FAILURE); } switch (cmd) { - case DDI_ATTACH: - con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: DDI_ATTACH")); - /* allocate the soft state for the instance */ - if (ddi_soft_state_zalloc(mrsas_state, instance_no) - != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, - "mr_sas%d: Failed to allocate soft state", - instance_no)); - - return (DDI_FAILURE); - } - - instance = (struct mrsas_instance *)ddi_get_soft_state - (mrsas_state, instance_no); - - if (instance == NULL) { - con_log(CL_ANN, (CE_WARN, - "mr_sas%d: Bad soft state", instance_no)); - - ddi_soft_state_free(mrsas_state, instance_no); - - return (DDI_FAILURE); - } - - bzero((caddr_t)instance, - sizeof (struct mrsas_instance)); - - instance->func_ptr = kmem_zalloc( - sizeof (struct mrsas_func_ptr), KM_SLEEP); - ASSERT(instance->func_ptr); - - /* Setup the PCI configuration space handles */ - if (pci_config_setup(dip, &instance->pci_handle) != - DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, - "mr_sas%d: pci config setup failed ", - instance_no)); - - kmem_free(instance->func_ptr, - sizeof (struct mrsas_func_ptr)); - ddi_soft_state_free(mrsas_state, instance_no); - - return (DDI_FAILURE); - } - - if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, - "mr_sas: failed to get registers.")); - - pci_config_teardown(&instance->pci_handle); - kmem_free(instance->func_ptr, - sizeof (struct mrsas_func_ptr)); - ddi_soft_state_free(mrsas_state, instance_no); - - return (DDI_FAILURE); - } - - vendor_id = pci_config_get16(instance->pci_handle, - PCI_CONF_VENID); - device_id = pci_config_get16(instance->pci_handle, - PCI_CONF_DEVID); - - subsysvid = pci_config_get16(instance->pci_handle, - PCI_CONF_SUBVENID); - subsysid = pci_config_get16(instance->pci_handle, - PCI_CONF_SUBSYSID); - - pci_config_put16(instance->pci_handle, PCI_CONF_COMM, - (pci_config_get16(instance->pci_handle, - PCI_CONF_COMM) | PCI_COMM_ME)); - irq = pci_config_get8(instance->pci_handle, - PCI_CONF_ILINE); - + case DDI_ATTACH: + /* allocate the soft state for the instance */ + if (ddi_soft_state_zalloc(mrsas_state, instance_no) + != DDI_SUCCESS) { + cmn_err(CE_WARN, + "mr_sas%d: Failed to allocate soft state", + instance_no); + return (DDI_FAILURE); + } + + instance = (struct mrsas_instance *)ddi_get_soft_state + (mrsas_state, instance_no); + + if (instance == NULL) { + cmn_err(CE_WARN, + "mr_sas%d: Bad soft state", instance_no); + ddi_soft_state_free(mrsas_state, instance_no); + return (DDI_FAILURE); + } + + instance->unroll.softs = 1; + + /* Setup the PCI configuration space handles */ + if (pci_config_setup(dip, &instance->pci_handle) != + DDI_SUCCESS) { + cmn_err(CE_WARN, + "mr_sas%d: pci config setup failed ", + instance_no); + + ddi_soft_state_free(mrsas_state, instance_no); + return (DDI_FAILURE); + } + + if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { + cmn_err(CE_WARN, + "mr_sas: failed to get registers."); + + pci_config_teardown(&instance->pci_handle); + ddi_soft_state_free(mrsas_state, instance_no); + return (DDI_FAILURE); + } + + vendor_id = pci_config_get16(instance->pci_handle, + PCI_CONF_VENID); + device_id = pci_config_get16(instance->pci_handle, + PCI_CONF_DEVID); + + subsysvid = pci_config_get16(instance->pci_handle, + PCI_CONF_SUBVENID); + subsysid = pci_config_get16(instance->pci_handle, + PCI_CONF_SUBSYSID); + + pci_config_put16(instance->pci_handle, PCI_CONF_COMM, + (pci_config_get16(instance->pci_handle, + PCI_CONF_COMM) | PCI_COMM_ME)); + irq = pci_config_get8(instance->pci_handle, + PCI_CONF_ILINE); + + con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " + "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s", + instance_no, vendor_id, device_id, subsysvid, + subsysid, irq, MRSAS_VERSION)); + + /* enable bus-mastering */ + command = pci_config_get16(instance->pci_handle, + PCI_CONF_COMM); + + if (!(command & PCI_COMM_ME)) { + command |= PCI_COMM_ME; + + pci_config_put16(instance->pci_handle, + PCI_CONF_COMM, command); + + con_log(CL_ANN, (CE_CONT, "mr_sas%d: " + "enable bus-mastering", instance_no)); + } else { con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " - "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s", - instance_no, vendor_id, device_id, subsysvid, - subsysid, irq, MRSAS_VERSION)); - - /* enable bus-mastering */ - command = pci_config_get16(instance->pci_handle, - PCI_CONF_COMM); - - if (!(command & PCI_COMM_ME)) { - command |= PCI_COMM_ME; - - pci_config_put16(instance->pci_handle, - PCI_CONF_COMM, command); - - con_log(CL_ANN, (CE_CONT, "mr_sas%d: " - "enable bus-mastering", instance_no)); - } else { - con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " - "bus-mastering already set", instance_no)); + "bus-mastering already set", instance_no)); + } + + /* initialize function pointers */ + switch (device_id) { + case PCI_DEVICE_ID_LSI_TBOLT: + case PCI_DEVICE_ID_LSI_INVADER: + con_log(CL_ANN, (CE_NOTE, + "mr_sas: 2208 T.B. device detected")); + + instance->func_ptr = + &mrsas_function_template_fusion; + instance->tbolt = 1; + break; + + case PCI_DEVICE_ID_LSI_2108VDE: + case PCI_DEVICE_ID_LSI_2108V: + con_log(CL_ANN, (CE_NOTE, + "mr_sas: 2108 Liberator device detected")); + + instance->func_ptr = + &mrsas_function_template_ppc; + break; + + default: + cmn_err(CE_WARN, + "mr_sas: Invalid device detected"); + + pci_config_teardown(&instance->pci_handle); + ddi_soft_state_free(mrsas_state, instance_no); + return (DDI_FAILURE); + } + + instance->baseaddress = pci_config_get32( + instance->pci_handle, PCI_CONF_BASE0); + instance->baseaddress &= 0x0fffc; + + instance->dip = dip; + instance->vendor_id = vendor_id; + instance->device_id = device_id; + instance->subsysvid = subsysvid; + instance->subsysid = subsysid; + instance->instance = instance_no; + + /* Initialize FMA */ + instance->fm_capabilities = ddi_prop_get_int( + DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS, + "fm-capable", DDI_FM_EREPORT_CAPABLE | + DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE + | DDI_FM_ERRCB_CAPABLE); + + mrsas_fm_init(instance); + + /* Setup register map */ + if ((ddi_dev_regsize(instance->dip, + REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) || + reglength < MINIMUM_MFI_MEM_SZ) { + goto fail_attach; + } + if (reglength > DEFAULT_MFI_MEM_SZ) { + reglength = DEFAULT_MFI_MEM_SZ; + con_log(CL_DLEVEL1, (CE_NOTE, + "mr_sas: register length to map is 0x%lx bytes", + reglength)); + } + if (ddi_regs_map_setup(instance->dip, + REGISTER_SET_IO_2108, &instance->regmap, 0, + reglength, &endian_attr, &instance->regmap_handle) + != DDI_SUCCESS) { + cmn_err(CE_WARN, + "mr_sas: couldn't map control registers"); + goto fail_attach; + } + + instance->unroll.regs = 1; + + /* + * Disable Interrupt Now. + * Setup Software interrupt + */ + instance->func_ptr->disable_intr(instance); + + if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, + "mrsas-enable-msi", &data) == DDI_SUCCESS) { + if (strncmp(data, "no", 3) == 0) { + msi_enable = 0; + con_log(CL_ANN1, (CE_WARN, + "msi_enable = %d disabled", msi_enable)); } - - /* initialize function pointers */ - if ((device_id == PCI_DEVICE_ID_LSI_2108VDE) || - (device_id == PCI_DEVICE_ID_LSI_2108V)) { - con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " - "2108V/DE detected", instance_no)); - instance->func_ptr->read_fw_status_reg = - read_fw_status_reg_ppc; - instance->func_ptr->issue_cmd = issue_cmd_ppc; - instance->func_ptr->issue_cmd_in_sync_mode = - issue_cmd_in_sync_mode_ppc; - instance->func_ptr->issue_cmd_in_poll_mode = - issue_cmd_in_poll_mode_ppc; - instance->func_ptr->enable_intr = - enable_intr_ppc; - instance->func_ptr->disable_intr = - disable_intr_ppc; - instance->func_ptr->intr_ack = intr_ack_ppc; - } else { - con_log(CL_ANN, (CE_WARN, - "mr_sas: Invalid device detected")); - - pci_config_teardown(&instance->pci_handle); - kmem_free(instance->func_ptr, - sizeof (struct mrsas_func_ptr)); - ddi_soft_state_free(mrsas_state, instance_no); - - return (DDI_FAILURE); + ddi_prop_free(data); + } + + con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable)); + + if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, + "mrsas-enable-fp", &data) == DDI_SUCCESS) { + if (strncmp(data, "no", 3) == 0) { + enable_fp = 0; + cmn_err(CE_NOTE, + "enable_fp = %d, Fast-Path disabled.\n", + enable_fp); } - instance->baseaddress = pci_config_get32( - instance->pci_handle, PCI_CONF_BASE0); - instance->baseaddress &= 0x0fffc; - - instance->dip = dip; - instance->vendor_id = vendor_id; - instance->device_id = device_id; - instance->subsysvid = subsysvid; - instance->subsysid = subsysid; - instance->instance = instance_no; - - /* Initialize FMA */ - instance->fm_capabilities = ddi_prop_get_int( - DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS, - "fm-capable", DDI_FM_EREPORT_CAPABLE | - DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE - | DDI_FM_ERRCB_CAPABLE); - - mrsas_fm_init(instance); - - /* Initialize Interrupts */ - if ((ddi_dev_regsize(instance->dip, - REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) || - reglength < MINIMUM_MFI_MEM_SZ) { - return (DDI_FAILURE); - } - if (reglength > DEFAULT_MFI_MEM_SZ) { - reglength = DEFAULT_MFI_MEM_SZ; - con_log(CL_DLEVEL1, (CE_NOTE, - "mr_sas: register length to map is " - "0x%lx bytes", reglength)); - } - if (ddi_regs_map_setup(instance->dip, - REGISTER_SET_IO_2108, &instance->regmap, 0, - reglength, &endian_attr, &instance->regmap_handle) - != DDI_SUCCESS) { - con_log(CL_ANN, (CE_NOTE, - "mr_sas: couldn't map control registers")); + ddi_prop_free(data); + } + + con_log(CL_DLEVEL1, (CE_NOTE, "enable_fp = %d\n", enable_fp)); + + /* Check for all supported interrupt types */ + if (ddi_intr_get_supported_types( + dip, &intr_types) != DDI_SUCCESS) { + cmn_err(CE_WARN, + "ddi_intr_get_supported_types() failed"); + goto fail_attach; + } + + con_log(CL_DLEVEL1, (CE_NOTE, + "ddi_intr_get_supported_types() ret: 0x%x", intr_types)); + + /* Initialize and Setup Interrupt handler */ + if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) { + if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) != + DDI_SUCCESS) { + cmn_err(CE_WARN, + "MSIX interrupt query failed"); goto fail_attach; } - - /* - * Disable Interrupt Now. - * Setup Software interrupt - */ - instance->func_ptr->disable_intr(instance); - - if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, - "mrsas-enable-msi", &data) == DDI_SUCCESS) { - if (strncmp(data, "no", 3) == 0) { - msi_enable = 0; - con_log(CL_ANN1, (CE_WARN, - "msi_enable = %d disabled", - msi_enable)); - } - ddi_prop_free(data); + instance->intr_type = DDI_INTR_TYPE_MSIX; + } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) { + if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) != + DDI_SUCCESS) { + cmn_err(CE_WARN, + "MSI interrupt query failed"); + goto fail_attach; } - - con_log(CL_DLEVEL1, (CE_WARN, "msi_enable = %d", - msi_enable)); - - /* Check for all supported interrupt types */ - if (ddi_intr_get_supported_types( - dip, &intr_types) != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, - "ddi_intr_get_supported_types() failed")); + instance->intr_type = DDI_INTR_TYPE_MSI; + } else if (intr_types & DDI_INTR_TYPE_FIXED) { + msi_enable = 0; + if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) != + DDI_SUCCESS) { + cmn_err(CE_WARN, + "FIXED interrupt query failed"); goto fail_attach; } - - con_log(CL_DLEVEL1, (CE_NOTE, - "ddi_intr_get_supported_types() ret: 0x%x", - intr_types)); - - /* Initialize and Setup Interrupt handler */ - if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) { - if (mrsas_add_intrs(instance, - DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, - "MSIX interrupt query failed")); - goto fail_attach; - } - instance->intr_type = DDI_INTR_TYPE_MSIX; - } else if (msi_enable && (intr_types & - DDI_INTR_TYPE_MSI)) { - if (mrsas_add_intrs(instance, - DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, - "MSI interrupt query failed")); - goto fail_attach; - } - instance->intr_type = DDI_INTR_TYPE_MSI; - } else if (intr_types & DDI_INTR_TYPE_FIXED) { - msi_enable = 0; - if (mrsas_add_intrs(instance, - DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, - "FIXED interrupt query failed")); - goto fail_attach; - } - instance->intr_type = DDI_INTR_TYPE_FIXED; - } else { - con_log(CL_ANN, (CE_WARN, "Device cannot " - "suppport either FIXED or MSI/X " - "interrupts")); - goto fail_attach; + instance->intr_type = DDI_INTR_TYPE_FIXED; + } else { + cmn_err(CE_WARN, "Device cannot " + "suppport either FIXED or MSI/X " + "interrupts"); + goto fail_attach; + } + + instance->unroll.intr = 1; + + if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, + "mrsas-enable-ctio", &data) == DDI_SUCCESS) { + if (strncmp(data, "no", 3) == 0) { + ctio_enable = 0; + con_log(CL_ANN1, (CE_WARN, + "ctio_enable = %d disabled", ctio_enable)); } - - added_isr_f = 1; - - if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, - "mrsas-enable-ctio", &data) == DDI_SUCCESS) { - if (strncmp(data, "no", 3) == 0) { - ctio_enable = 0; - con_log(CL_ANN1, (CE_WARN, - "ctio_enable = %d disabled", - ctio_enable)); - } - ddi_prop_free(data); - } - - con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", - ctio_enable)); - - /* setup the mfi based low level driver */ - if (init_mfi(instance) != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, "mr_sas: " - "could not initialize the low level driver")); - - goto fail_attach; - } - - /* Initialize all Mutex */ - INIT_LIST_HEAD(&instance->completed_pool_list); - mutex_init(&instance->completed_pool_mtx, - "completed_pool_mtx", MUTEX_DRIVER, - DDI_INTR_PRI(instance->intr_pri)); - - mutex_init(&instance->app_cmd_pool_mtx, - "app_cmd_pool_mtx", MUTEX_DRIVER, - DDI_INTR_PRI(instance->intr_pri)); - - mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx", + ddi_prop_free(data); + } + + con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", ctio_enable)); + + /* setup the mfi based low level driver */ + if (mrsas_init_adapter(instance) != DDI_SUCCESS) { + cmn_err(CE_WARN, "mr_sas: " + "could not initialize the low level driver"); + + goto fail_attach; + } + + /* Initialize all Mutex */ + INIT_LIST_HEAD(&instance->completed_pool_list); + mutex_init(&instance->completed_pool_mtx, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); + + mutex_init(&instance->sync_map_mtx, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); + + mutex_init(&instance->app_cmd_pool_mtx, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); + + mutex_init(&instance->config_dev_mtx, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); + + mutex_init(&instance->cmd_pend_mtx, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); + + mutex_init(&instance->ocr_flags_mtx, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); + + mutex_init(&instance->int_cmd_mtx, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); + cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); + + mutex_init(&instance->cmd_pool_mtx, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); + + mutex_init(&instance->reg_write_mtx, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); + + if (instance->tbolt) { + mutex_init(&instance->cmd_app_pool_mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); - mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx", - MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); - - mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", + mutex_init(&instance->chip_mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); - cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); - - mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx", - MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); - - instance->timeout_id = (timeout_id_t)-1; - - /* Register our soft-isr for highlevel interrupts. */ - instance->isr_level = instance->intr_pri; + + } + + instance->unroll.mutexs = 1; + + instance->timeout_id = (timeout_id_t)-1; + + /* Register our soft-isr for highlevel interrupts. */ + instance->isr_level = instance->intr_pri; + if (!(instance->tbolt)) { if (instance->isr_level == HIGH_LEVEL_INTR) { - if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, + if (ddi_add_softintr(dip, + DDI_SOFTINT_HIGH, &instance->soft_intr_id, NULL, NULL, mrsas_softintr, (caddr_t)instance) != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, - " Software ISR did not register")); + cmn_err(CE_WARN, + "Software ISR did not register"); goto fail_attach; } - added_soft_isr_f = 1; + instance->unroll.soft_isr = 1; + } - - /* Allocate a transport structure */ - tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); - - if (tran == NULL) { - con_log(CL_ANN, (CE_WARN, - "scsi_hba_tran_alloc failed")); - goto fail_attach; - } - - tran_alloc_f = 1; - - instance->tran = tran; - - tran->tran_hba_private = instance; - tran->tran_tgt_init = mrsas_tran_tgt_init; - tran->tran_tgt_probe = scsi_hba_probe; - tran->tran_tgt_free = mrsas_tran_tgt_free; + } + + instance->softint_running = 0; + + /* Allocate a transport structure */ + tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); + + if (tran == NULL) { + cmn_err(CE_WARN, + "scsi_hba_tran_alloc failed"); + goto fail_attach; + } + + instance->tran = tran; + instance->unroll.tran = 1; + + tran->tran_hba_private = instance; + tran->tran_tgt_init = mrsas_tran_tgt_init; + tran->tran_tgt_probe = scsi_hba_probe; + tran->tran_tgt_free = mrsas_tran_tgt_free; + if (instance->tbolt) { + tran->tran_init_pkt = + mrsas_tbolt_tran_init_pkt; + tran->tran_start = + mrsas_tbolt_tran_start; + } else { tran->tran_init_pkt = mrsas_tran_init_pkt; tran->tran_start = mrsas_tran_start; - tran->tran_abort = mrsas_tran_abort; - tran->tran_reset = mrsas_tran_reset; - tran->tran_getcap = mrsas_tran_getcap; - tran->tran_setcap = mrsas_tran_setcap; - tran->tran_destroy_pkt = mrsas_tran_destroy_pkt; - tran->tran_dmafree = mrsas_tran_dmafree; - tran->tran_sync_pkt = mrsas_tran_sync_pkt; - tran->tran_bus_config = mrsas_tran_bus_config; - - if (mrsas_relaxed_ordering) - mrsas_generic_dma_attr.dma_attr_flags |= - DDI_DMA_RELAXED_ORDERING; - - - tran_dma_attr = mrsas_generic_dma_attr; - tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; - - /* Attach this instance of the hba */ - if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) - != DDI_SUCCESS) { - con_log(CL_ANN, (CE_WARN, - "scsi_hba_attach failed")); - - goto fail_attach; - } - - /* create devctl node for cfgadm command */ - if (ddi_create_minor_node(dip, "devctl", - S_IFCHR, INST2DEVCTL(instance_no), - DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { - con_log(CL_ANN, (CE_WARN, - "mr_sas: failed to create devctl node.")); - - goto fail_attach; - } - - create_devctl_node_f = 1; - - /* create scsi node for cfgadm command */ - if (ddi_create_minor_node(dip, "scsi", S_IFCHR, - INST2SCSI(instance_no), - DDI_NT_SCSI_ATTACHMENT_POINT, 0) == - DDI_FAILURE) { - con_log(CL_ANN, (CE_WARN, - "mr_sas: failed to create scsi node.")); - - goto fail_attach; - } - - create_scsi_node_f = 1; - - (void) sprintf(instance->iocnode, "%d:lsirdctl", - instance_no); - - /* - * Create a node for applications - * for issuing ioctl to the driver. - */ - if (ddi_create_minor_node(dip, instance->iocnode, - S_IFCHR, INST2LSIRDCTL(instance_no), - DDI_PSEUDO, 0) == DDI_FAILURE) { - con_log(CL_ANN, (CE_WARN, - "mr_sas: failed to create ioctl node.")); - - goto fail_attach; + } + tran->tran_abort = mrsas_tran_abort; + tran->tran_reset = mrsas_tran_reset; + tran->tran_getcap = mrsas_tran_getcap; + tran->tran_setcap = mrsas_tran_setcap; + tran->tran_destroy_pkt = mrsas_tran_destroy_pkt; + tran->tran_dmafree = mrsas_tran_dmafree; + tran->tran_sync_pkt = mrsas_tran_sync_pkt; + tran->tran_quiesce = mrsas_tran_quiesce; + tran->tran_unquiesce = mrsas_tran_unquiesce; + tran->tran_bus_config = mrsas_tran_bus_config; + + if (mrsas_relaxed_ordering) + mrsas_generic_dma_attr.dma_attr_flags |= + DDI_DMA_RELAXED_ORDERING; + + + tran_dma_attr = mrsas_generic_dma_attr; + tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; + + /* Attach this instance of the hba */ + if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) + != DDI_SUCCESS) { + cmn_err(CE_WARN, + "scsi_hba_attach failed"); + + goto fail_attach; + } + instance->unroll.tranSetup = 1; + con_log(CL_ANN1, + (CE_CONT, "scsi_hba_attach_setup() done.")); + + /* create devctl node for cfgadm command */ + if (ddi_create_minor_node(dip, "devctl", + S_IFCHR, INST2DEVCTL(instance_no), + DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { + cmn_err(CE_WARN, + "mr_sas: failed to create devctl node."); + + goto fail_attach; + } + + instance->unroll.devctl = 1; + + /* create scsi node for cfgadm command */ + if (ddi_create_minor_node(dip, "scsi", S_IFCHR, + INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) == + DDI_FAILURE) { + cmn_err(CE_WARN, + "mr_sas: failed to create scsi node."); + + goto fail_attach; + } + + instance->unroll.scsictl = 1; + + (void) sprintf(instance->iocnode, "%d:lsirdctl", + instance_no); + + /* + * Create a node for applications + * for issuing ioctl to the driver. + */ + if (ddi_create_minor_node(dip, instance->iocnode, + S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) == + DDI_FAILURE) { + cmn_err(CE_WARN, + "mr_sas: failed to create ioctl node."); + + goto fail_attach; + } + + instance->unroll.ioctl = 1; + + /* Create a taskq to handle dr events */ + if ((instance->taskq = ddi_taskq_create(dip, + "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) { + cmn_err(CE_WARN, + "mr_sas: failed to create taskq "); + instance->taskq = NULL; + goto fail_attach; + } + instance->unroll.taskq = 1; + con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done.")); + + /* enable interrupt */ + instance->func_ptr->enable_intr(instance); + + /* initiate AEN */ + if (start_mfi_aen(instance)) { + cmn_err(CE_WARN, + "mr_sas: failed to initiate AEN."); + goto fail_attach; + } + instance->unroll.aenPend = 1; + con_log(CL_ANN1, + (CE_CONT, "AEN started for instance %d.", instance_no)); + + /* Finally! We are on the air. */ + ddi_report_dev(dip); + + /* FMA handle checking. */ + if (mrsas_check_acc_handle(instance->regmap_handle) != + DDI_SUCCESS) { + goto fail_attach; + } + if (mrsas_check_acc_handle(instance->pci_handle) != + DDI_SUCCESS) { + goto fail_attach; + } + + instance->mr_ld_list = + kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld), + KM_SLEEP); + instance->unroll.ldlist_buff = 1; + +#ifdef PDSUPPORT + if (instance->tbolt) { + instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX; + instance->mr_tbolt_pd_list = + kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) * + sizeof (struct mrsas_tbolt_pd), KM_SLEEP); + ASSERT(instance->mr_tbolt_pd_list); + for (i = 0; i < instance->mr_tbolt_pd_max; i++) { + instance->mr_tbolt_pd_list[i].lun_type = + MRSAS_TBOLT_PD_LUN; + instance->mr_tbolt_pd_list[i].dev_id = + (uint8_t)i; } - create_ioc_node_f = 1; - - /* Create a taskq to handle dr events */ - if ((instance->taskq = ddi_taskq_create(dip, - "mrsas_dr_taskq", 1, - TASKQ_DEFAULTPRI, 0)) == NULL) { - con_log(CL_ANN, (CE_WARN, - "mr_sas: failed to create taskq ")); - instance->taskq = NULL; - goto fail_attach; - } - - /* enable interrupt */ - instance->func_ptr->enable_intr(instance); - - /* initiate AEN */ - if (start_mfi_aen(instance)) { - con_log(CL_ANN, (CE_WARN, - "mr_sas: failed to initiate AEN.")); - goto fail_initiate_aen; - } - - con_log(CL_DLEVEL1, (CE_NOTE, - "AEN started for instance %d.", instance_no)); - - /* Finally! We are on the air. */ - ddi_report_dev(dip); - - if (mrsas_check_acc_handle(instance->regmap_handle) != - DDI_SUCCESS) { - goto fail_attach; - } - if (mrsas_check_acc_handle(instance->pci_handle) != - DDI_SUCCESS) { - goto fail_attach; - } - instance->mr_ld_list = - kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld), - KM_SLEEP); - break; - case DDI_PM_RESUME: - con_log(CL_ANN, (CE_NOTE, - "mr_sas: DDI_PM_RESUME")); - break; - case DDI_RESUME: - con_log(CL_ANN, (CE_NOTE, - "mr_sas: DDI_RESUME")); - break; - default: - con_log(CL_ANN, (CE_WARN, - "mr_sas: invalid attach cmd=%x", cmd)); - return (DDI_FAILURE); - } - + instance->unroll.pdlist_buff = 1; + } +#endif + break; + case DDI_PM_RESUME: + con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME")); + break; + case DDI_RESUME: + con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME")); + break; + default: + con_log(CL_ANN, + (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd)); + return (DDI_FAILURE); + } + + + con_log(CL_DLEVEL1, + (CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d", + instance_no)); return (DDI_SUCCESS); -fail_initiate_aen: fail_attach: - if (create_devctl_node_f) { - ddi_remove_minor_node(dip, "devctl"); - } - - if (create_scsi_node_f) { - ddi_remove_minor_node(dip, "scsi"); - } - - if (create_ioc_node_f) { - ddi_remove_minor_node(dip, instance->iocnode); - } - - if (tran_alloc_f) { - scsi_hba_tran_free(tran); - } - - - if (added_soft_isr_f) { - ddi_remove_softintr(instance->soft_intr_id); - } - - if (added_isr_f) { - mrsas_rem_intrs(instance); - } - - if (instance && instance->taskq) { - ddi_taskq_destroy(instance->taskq); - } + + mrsas_undo_resources(dip, instance); mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); @@ -780,15 +985,30 @@ mrsas_fm_fini(instance); pci_config_teardown(&instance->pci_handle); - ddi_soft_state_free(mrsas_state, instance_no); - con_log(CL_ANN, (CE_NOTE, - "mr_sas: return failure from mrsas_attach")); + con_log(CL_ANN, (CE_WARN, "mr_sas: return failure from mrsas_attach")); + + cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d", + instance_no); return (DDI_FAILURE); } +/* + * getinfo - gets device information + * @dip: + * @cmd: + * @arg: + * @resultp: + * + * The system calls getinfo() to obtain configuration information that only + * the driver knows. The mapping of minor numbers to device instance is + * entirely under the control of the driver. The system sometimes needs to ask + * the driver which device a particular dev_t represents. + * Given the device number return the devinfo pointer from the scsi_device + * structure. + */ /*ARGSUSED*/ static int mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) @@ -827,6 +1047,19 @@ return (rval); } +/* + * detach - detaches a device from the system + * @dip: pointer to the device's dev_info structure + * @cmd: type of detach + * + * A driver's detach() entry point is called to detach an instance of a device + * that is bound to the driver. The entry point is called with the instance of + * the device node to be detached and with DDI_DETACH, which is specified as + * the cmd argument to the entry point. + * This routine is called during driver unload. We free all the allocated + * resources and call the corresponding LLD so that it can also release all + * its resources. + */ static int mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { @@ -834,7 +1067,8 @@ struct mrsas_instance *instance; - con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + /* CONSTCOND */ ASSERT(NO_COMPETING_THREADS); @@ -845,9 +1079,9 @@ instance_no); if (!instance) { - con_log(CL_ANN, (CE_WARN, + cmn_err(CE_WARN, "mr_sas:%d could not get instance in detach", - instance_no)); + instance_no); return (DDI_FAILURE); } @@ -858,84 +1092,253 @@ instance->subsysvid, instance->subsysid)); switch (cmd) { - case DDI_DETACH: - con_log(CL_ANN, (CE_NOTE, - "mrsas_detach: DDI_DETACH")); - - if (scsi_hba_detach(dip) != DDI_SUCCESS) { + case DDI_DETACH: + con_log(CL_ANN, (CE_NOTE, + "mrsas_detach: DDI_DETACH")); + + mutex_enter(&instance->config_dev_mtx); + if (instance->timeout_id != (timeout_id_t)-1) { + mutex_exit(&instance->config_dev_mtx); + (void) untimeout(instance->timeout_id); + instance->timeout_id = (timeout_id_t)-1; + mutex_enter(&instance->config_dev_mtx); + instance->unroll.timer = 0; + } + mutex_exit(&instance->config_dev_mtx); + + if (instance->unroll.tranSetup == 1) { + if (scsi_hba_detach(dip) != DDI_SUCCESS) { + cmn_err(CE_WARN, + "mr_sas2%d: failed to detach", + instance_no); + return (DDI_FAILURE); + } + instance->unroll.tranSetup = 0; + con_log(CL_ANN1, + (CE_CONT, "scsi_hba_dettach() done.")); + } + + flush_cache(instance); + + mrsas_undo_resources(dip, instance); + + mrsas_fm_fini(instance); + + pci_config_teardown(&instance->pci_handle); + ddi_soft_state_free(mrsas_state, instance_no); + break; + + case DDI_PM_SUSPEND: + con_log(CL_ANN, (CE_NOTE, + "mrsas_detach: DDI_PM_SUSPEND")); + + break; + case DDI_SUSPEND: + con_log(CL_ANN, (CE_NOTE, + "mrsas_detach: DDI_SUSPEND")); + + break; + default: con_log(CL_ANN, (CE_WARN, - "mr_sas:%d failed to detach", - instance_no)); - - return (DDI_FAILURE); - } - - scsi_hba_tran_free(instance->tran); - - flush_cache(instance); - - if (abort_aen_cmd(instance, instance->aen_cmd)) { - con_log(CL_ANN, (CE_WARN, "mrsas_detach: " - "failed to abort prevous AEN command")); - + "invalid detach command:0x%x", cmd)); return (DDI_FAILURE); - } - - instance->func_ptr->disable_intr(instance); - - if (instance->isr_level == HIGH_LEVEL_INTR) { - ddi_remove_softintr(instance->soft_intr_id); - } - - mrsas_rem_intrs(instance); - - if (instance->taskq) { - ddi_taskq_destroy(instance->taskq); - } - kmem_free(instance->mr_ld_list, MRDRV_MAX_LD - * sizeof (struct mrsas_ld)); - free_space_for_mfi(instance); - - mrsas_fm_fini(instance); - - pci_config_teardown(&instance->pci_handle); - - kmem_free(instance->func_ptr, - sizeof (struct mrsas_func_ptr)); - - if (instance->timeout_id != (timeout_id_t)-1) { - (void) untimeout(instance->timeout_id); - instance->timeout_id = (timeout_id_t)-1; - } - ddi_soft_state_free(mrsas_state, instance_no); - break; - case DDI_PM_SUSPEND: - con_log(CL_ANN, (CE_NOTE, - "mrsas_detach: DDI_PM_SUSPEND")); - - break; - case DDI_SUSPEND: - con_log(CL_ANN, (CE_NOTE, - "mrsas_detach: DDI_SUSPEND")); - - break; - default: - con_log(CL_ANN, (CE_WARN, - "invalid detach command:0x%x", cmd)); - return (DDI_FAILURE); } return (DDI_SUCCESS); } + +static void +mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance) +{ + int instance_no; + + con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + + + instance_no = ddi_get_instance(dip); + + + if (instance->unroll.ioctl == 1) { + ddi_remove_minor_node(dip, instance->iocnode); + instance->unroll.ioctl = 0; + } + + if (instance->unroll.scsictl == 1) { + ddi_remove_minor_node(dip, "scsi"); + instance->unroll.scsictl = 0; + } + + if (instance->unroll.devctl == 1) { + ddi_remove_minor_node(dip, "devctl"); + instance->unroll.devctl = 0; + } + + if (instance->unroll.tranSetup == 1) { + if (scsi_hba_detach(dip) != DDI_SUCCESS) { + cmn_err(CE_WARN, + "mr_sas2%d: failed to detach", instance_no); + return; /* DDI_FAILURE */ + } + instance->unroll.tranSetup = 0; + con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done.")); + } + + if (instance->unroll.tran == 1) { + scsi_hba_tran_free(instance->tran); + instance->unroll.tran = 0; + con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done.")); + } + + if (instance->unroll.syncCmd == 1) { + if (instance->tbolt) { + if (abort_syncmap_cmd(instance, + instance->map_update_cmd)) { + cmn_err(CE_WARN, "mrsas_detach: " + "failed to abort previous syncmap command"); + } + + instance->unroll.syncCmd = 0; + con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done.")); + } + } + + if (instance->unroll.aenPend == 1) { + if (abort_aen_cmd(instance, instance->aen_cmd)) + cmn_err(CE_WARN, "mrsas_detach: " + "failed to abort prevous AEN command"); + + instance->unroll.aenPend = 0; + con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done.")); + /* This means the controller is fully initialized and running */ + /* Shutdown should be a last command to controller. */ + /* shutdown_controller(); */ + } + + + if (instance->unroll.timer == 1) { + if (instance->timeout_id != (timeout_id_t)-1) { + (void) untimeout(instance->timeout_id); + instance->timeout_id = (timeout_id_t)-1; + + instance->unroll.timer = 0; + } + } + + instance->func_ptr->disable_intr(instance); + + + if (instance->unroll.mutexs == 1) { + mutex_destroy(&instance->cmd_pool_mtx); + mutex_destroy(&instance->app_cmd_pool_mtx); + mutex_destroy(&instance->cmd_pend_mtx); + mutex_destroy(&instance->completed_pool_mtx); + mutex_destroy(&instance->sync_map_mtx); + mutex_destroy(&instance->int_cmd_mtx); + cv_destroy(&instance->int_cmd_cv); + mutex_destroy(&instance->config_dev_mtx); + mutex_destroy(&instance->ocr_flags_mtx); + mutex_destroy(&instance->reg_write_mtx); + + if (instance->tbolt) { + mutex_destroy(&instance->cmd_app_pool_mtx); + mutex_destroy(&instance->chip_mtx); + } + + instance->unroll.mutexs = 0; + con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done.")); + } + + + if (instance->unroll.soft_isr == 1) { + ddi_remove_softintr(instance->soft_intr_id); + instance->unroll.soft_isr = 0; + } + + if (instance->unroll.intr == 1) { + mrsas_rem_intrs(instance); + instance->unroll.intr = 0; + } + + + if (instance->unroll.taskq == 1) { + if (instance->taskq) { + ddi_taskq_destroy(instance->taskq); + instance->unroll.taskq = 0; + } + + } + + /* + * free dma memory allocated for + * cmds/frames/queues/driver version etc + */ + if (instance->unroll.verBuff == 1) { + (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj); + instance->unroll.verBuff = 0; + } + + if (instance->unroll.pdlist_buff == 1) { + if (instance->mr_tbolt_pd_list != NULL) { + kmem_free(instance->mr_tbolt_pd_list, + MRSAS_TBOLT_GET_PD_MAX(instance) * + sizeof (struct mrsas_tbolt_pd)); + } + + instance->mr_tbolt_pd_list = NULL; + instance->unroll.pdlist_buff = 0; + } + + if (instance->unroll.ldlist_buff == 1) { + if (instance->mr_ld_list != NULL) { + kmem_free(instance->mr_ld_list, MRDRV_MAX_LD + * sizeof (struct mrsas_ld)); + } + + instance->mr_ld_list = NULL; + instance->unroll.ldlist_buff = 0; + } + + if (instance->tbolt) { + if (instance->unroll.alloc_space_mpi2 == 1) { + free_space_for_mpi2(instance); + instance->unroll.alloc_space_mpi2 = 0; + } + } else { + if (instance->unroll.alloc_space_mfi == 1) { + free_space_for_mfi(instance); + instance->unroll.alloc_space_mfi = 0; + } + } + + if (instance->unroll.regs == 1) { + ddi_regs_map_free(&instance->regmap_handle); + instance->unroll.regs = 0; + con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done.")); + } +} + + + /* * ************************************************************************** * - * * - * common entry points - for character driver types * - * * + * * + * common entry points - for character driver types * + * * * ************************************************************************** * */ -static int +/* + * open - gets access to a device + * @dev: + * @openflags: + * @otyp: + * @credp: + * + * Access to a device by one or more application programs is controlled + * through the open() and close() entry points. The primary function of + * open() is to verify that the open request is allowed. + */ +static int mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) { int rval = 0; @@ -968,7 +1371,17 @@ return (rval); } -static int +/* + * close - gives up access to a device + * @dev: + * @openflags: + * @otyp: + * @credp: + * + * close() should perform any cleanup necessary to finish using the minor + * device, and prepare the device (and driver) to be opened again. + */ +static int mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp) { int rval = 0; @@ -984,6 +1397,23 @@ return (rval); } +/* + * ioctl - performs a range of I/O commands for character drivers + * @dev: + * @cmd: + * @arg: + * @mode: + * @credp: + * @rvalp: + * + * ioctl() routine must make sure that user data is copied into or out of the + * kernel address space explicitly using copyin(), copyout(), ddi_copyin(), + * and ddi_copyout(), as appropriate. + * This is a wrapper routine to serialize access to the actual ioctl routine. + * ioctl() should return 0 on success, or the appropriate error number. The + * driver may also set the value returned to the calling process through rvalp. + */ + static int mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) @@ -1064,12 +1494,19 @@ /* * ************************************************************************** * - * * - * common entry points - for block driver types * - * * + * * + * common entry points - for block driver types * + * * * ************************************************************************** * */ -#ifdef __sparc +#ifdef __sparc +/* + * reset - TBD + * @dip: + * @cmd: + * + * TBD + */ /*ARGSUSED*/ static int mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) @@ -1092,7 +1529,7 @@ instance->func_ptr->disable_intr(instance); - con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d", + con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d", instance_no)); flush_cache(instance); @@ -1130,14 +1567,26 @@ "failed to abort prevous AEN command QUIESCE")); } + if (instance->tbolt) { + if (abort_syncmap_cmd(instance, + instance->map_update_cmd)) { + cmn_err(CE_WARN, + "mrsas_detach: failed to abort " + "previous syncmap command"); + return (DDI_FAILURE); + } + } + instance->func_ptr->disable_intr(instance); - con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d", + con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d", instance_no)); flush_cache(instance); if (wait_for_outstanding(instance)) { + con_log(CL_ANN1, + (CE_CONT, "wait_for_outstanding: return FAIL.\n")); return (DDI_FAILURE); } return (DDI_SUCCESS); @@ -1146,11 +1595,24 @@ /* * ************************************************************************** * - * * - * entry points (SCSI HBA) * - * * + * * + * entry points (SCSI HBA) * + * * * ************************************************************************** * */ +/* + * tran_tgt_init - initialize a target device instance + * @hba_dip: + * @tgt_dip: + * @tran: + * @sd: + * + * The tran_tgt_init() entry point enables the HBA to allocate and initialize + * any per-target resources. tran_tgt_init() also enables the HBA to qualify + * the device's address as valid and supportable for that particular HBA. + * By returning DDI_FAILURE, the instance of the target driver for that device + * is not probed or attached. + */ /*ARGSUSED*/ static int mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, @@ -1159,32 +1621,61 @@ struct mrsas_instance *instance; uint16_t tgt = sd->sd_address.a_target; uint8_t lun = sd->sd_address.a_lun; - - con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init target %d lun %d", + dev_info_t *child = NULL; + + con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d", tgt, lun)); instance = ADDR2MR(&sd->sd_address); if (ndi_dev_is_persistent_node(tgt_dip) == 0) { - (void) ndi_merge_node(tgt_dip, mrsas_name_node); - ddi_set_name_addr(tgt_dip, NULL); - - con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init in " - "ndi_dev_is_persistent_node DDI_FAILURE t = %d l = %d", - tgt, lun)); + /* + * If no persistent node exists, we don't allow .conf node + * to be created. + */ + if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { + con_log(CL_DLEVEL2, + (CE_NOTE, "mrsas_tgt_init find child =" + " %p t = %d l = %d", (void *)child, tgt, lun)); + if (ndi_merge_node(tgt_dip, mrsas_name_node) != + DDI_SUCCESS) + /* Create this .conf node */ + return (DDI_SUCCESS); + } + con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per " + "DDI_FAILURE t = %d l = %d", tgt, lun)); return (DDI_FAILURE); - } - - con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p", + + } + + con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p", (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip)); if (tgt < MRDRV_MAX_LD && lun == 0) { if (instance->mr_ld_list[tgt].dip == NULL && strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) { + mutex_enter(&instance->config_dev_mtx); instance->mr_ld_list[tgt].dip = tgt_dip; instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN; - } - } + instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID; + mutex_exit(&instance->config_dev_mtx); + } + } + +#ifdef PDSUPPORT + else if (instance->tbolt) { + if (instance->mr_tbolt_pd_list[tgt].dip == NULL) { + mutex_enter(&instance->config_dev_mtx); + instance->mr_tbolt_pd_list[tgt].dip = tgt_dip; + instance->mr_tbolt_pd_list[tgt].flag = + MRDRV_TGT_VALID; + mutex_exit(&instance->config_dev_mtx); + con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:" + "t%xl%x", tgt, lun)); + } + } +#endif + return (DDI_SUCCESS); } @@ -1199,16 +1690,29 @@ instance = ADDR2MR(&sd->sd_address); - con_log(CL_ANN1, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun)); + con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun)); if (tgt < MRDRV_MAX_LD && lun == 0) { if (instance->mr_ld_list[tgt].dip == tgt_dip) { + mutex_enter(&instance->config_dev_mtx); instance->mr_ld_list[tgt].dip = NULL; - } - } + mutex_exit(&instance->config_dev_mtx); + } + } + +#ifdef PDSUPPORT + else if (instance->tbolt) { + mutex_enter(&instance->config_dev_mtx); + instance->mr_tbolt_pd_list[tgt].dip = NULL; + mutex_exit(&instance->config_dev_mtx); + con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL" + "for tgt:%x", tgt)); + } +#endif + } -static dev_info_t * +dev_info_t * mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun) { dev_info_t *child = NULL; @@ -1219,6 +1723,10 @@ for (child = ddi_get_child(instance->dip); child; child = ddi_get_next_sibling(child)) { + if (ndi_dev_is_persistent_node(child) == 0) { + continue; + } + if (mrsas_name_node(child, tmp, MAXNAMELEN) != DDI_SUCCESS) { continue; @@ -1228,11 +1736,17 @@ break; } } - con_log(CL_ANN1, (CE_NOTE, "mrsas_find_child: return child = %p", + con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p", (void *)child)); return (child); } +/* + * mrsas_name_node - + * @dip: + * @name: + * @len: + */ static int mrsas_name_node(dev_info_t *dip, char *name, int len) { @@ -1240,14 +1754,14 @@ tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "target", -1); - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt)); if (tgt == -1) { return (DDI_FAILURE); } lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "lun", -1); - con_log(CL_ANN1, + con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun)); if (lun == -1) { return (DDI_FAILURE); @@ -1256,6 +1770,26 @@ return (DDI_SUCCESS); } +/* + * tran_init_pkt - allocate & initialize a scsi_pkt structure + * @ap: + * @pkt: + * @bp: + * @cmdlen: + * @statuslen: + * @tgtlen: + * @flags: + * @callback: + * + * The tran_init_pkt() entry point allocates and initializes a scsi_pkt + * structure and DMA resources for a target driver request. The + * tran_init_pkt() entry point is called when the target driver calls the + * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point + * is a request to perform one or more of three possible services: + * - allocation and initialization of a scsi_pkt structure + * - allocation of DMA resources for data transfer + * - reallocation of DMA resources for the next portion of the data transfer + */ static struct scsi_pkt * mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, int tgtlen, @@ -1265,7 +1799,7 @@ struct mrsas_instance *instance; struct scsi_pkt *new_pkt; - con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); instance = ADDR2MR(ap); @@ -1327,14 +1861,31 @@ return (pkt); } +/* + * tran_start - transport a SCSI command to the addressed target + * @ap: + * @pkt: + * + * The tran_start() entry point for a SCSI HBA driver is called to transport a + * SCSI command to the addressed target. The SCSI command is described + * entirely within the scsi_pkt structure, which the target driver allocated + * through the HBA driver's tran_init_pkt() entry point. If the command + * involves a data transfer, DMA resources must also have been allocated for + * the scsi_pkt structure. + * + * Return Values : + * TRAN_BUSY - request queue is full, no more free scbs + * TRAN_ACCEPT - pkt has been submitted to the instance + */ static int mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) { - uchar_t cmd_done = 0; + uchar_t cmd_done = 0; struct mrsas_instance *instance = ADDR2MR(ap); struct mrsas_cmd *cmd; + con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); if (instance->deadadapter == 1) { con_log(CL_ANN1, (CE_WARN, "mrsas_tran_start: return TRAN_FATAL_ERROR " @@ -1347,12 +1898,12 @@ } if (instance->adapterresetinprogress) { - con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " + con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, " "returning mfi_pkt and setting TRAN_BUSY\n")); return (TRAN_BUSY); } - con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x", + con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x", __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time)); pkt->pkt_reason = CMD_CMPLT; @@ -1394,16 +1945,14 @@ /* Synchronize the Cmd frame for the controller */ (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); - con_log(CL_ANN1, (CE_NOTE, "Push SCSI CDB[0]=0x%x" + con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x" "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index)); instance->func_ptr->issue_cmd(cmd, instance); } else { struct mrsas_header *hdr = &cmd->frame->hdr; - cmd->sync_cmd = MRSAS_TRUE; - - instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); + instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd); pkt->pkt_reason = CMD_CMPLT; pkt->pkt_statistics = 0; @@ -1416,7 +1965,8 @@ break; case MFI_STAT_SCSI_DONE_WITH_ERROR: - + con_log(CL_ANN, (CE_CONT, + "mrsas_tran_start: scsi done with error")); pkt->pkt_reason = CMD_CMPLT; pkt->pkt_statistics = 0; @@ -1424,6 +1974,8 @@ break; case MFI_STAT_DEVICE_NOT_FOUND: + con_log(CL_ANN, (CE_CONT, + "mrsas_tran_start: device not found error")); pkt->pkt_reason = CMD_DEV_GONE; pkt->pkt_statistics = STAT_DISCON; break; @@ -1446,6 +1998,19 @@ return (TRAN_ACCEPT); } +/* + * tran_abort - Abort any commands that are currently in transport + * @ap: + * @pkt: + * + * The tran_abort() entry point for a SCSI HBA driver is called to abort any + * commands that are currently in transport for a particular target. This entry + * point is called when a target driver calls scsi_abort(). The tran_abort() + * entry point should attempt to abort the command denoted by the pkt + * parameter. If the pkt parameter is NULL, tran_abort() should attempt to + * abort all outstanding commands in the transport layer for the particular + * target or logical unit. + */ /*ARGSUSED*/ static int mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) @@ -1457,18 +2022,46 @@ return (DDI_FAILURE); } +/* + * tran_reset - reset either the SCSI bus or target + * @ap: + * @level: + * + * The tran_reset() entry point for a SCSI HBA driver is called to reset either + * the SCSI bus or a particular SCSI target device. This entry point is called + * when a target driver calls scsi_reset(). The tran_reset() entry point must + * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the + * particular target or logical unit must be reset. + */ /*ARGSUSED*/ static int mrsas_tran_reset(struct scsi_address *ap, int level) { + struct mrsas_instance *instance = ADDR2MR(ap); + con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); - /* reset command not supported by H/W */ - - return (DDI_FAILURE); - + if (wait_for_outstanding(instance)) { + con_log(CL_ANN1, + (CE_CONT, "wait_for_outstanding: return FAIL.\n")); + return (DDI_FAILURE); + } else { + return (DDI_SUCCESS); + } } +/* + * tran_getcap - get one of a set of SCSA-defined capabilities + * @ap: + * @cap: + * @whom: + * + * The target driver can request the current setting of the capability for a + * particular target by setting the whom parameter to nonzero. A whom value of + * zero indicates a request for the current setting of the general capability + * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1 + * for undefined capabilities or the current value of the requested capability. + */ /*ARGSUSED*/ static int mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom) @@ -1477,7 +2070,7 @@ struct mrsas_instance *instance = ADDR2MR(ap); - con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); /* we do allow inquiring about capabilities for other targets */ if (cap == NULL) { @@ -1486,8 +2079,13 @@ switch (scsi_hba_lookup_capstr(cap)) { case SCSI_CAP_DMA_MAX: - /* Limit to 16MB max transfer */ - rval = mrsas_max_cap_maxxfer; + if (instance->tbolt) { + /* Limit to 256k max transfer */ + rval = mrsas_tbolt_max_cap_maxxfer; + } else { + /* Limit to 16MB max transfer */ + rval = mrsas_max_cap_maxxfer; + } break; case SCSI_CAP_MSG_OUT: rval = 1; @@ -1536,13 +2134,29 @@ return (rval); } +/* + * tran_setcap - set one of a set of SCSA-defined capabilities + * @ap: + * @cap: + * @value: + * @whom: + * + * The target driver might request that the new value be set for a particular + * target by setting the whom parameter to nonzero. A whom value of zero + * means that request is to set the new value for the SCSI bus or for adapter + * hardware in general. + * The tran_setcap() should return the following values as appropriate: + * - -1 for undefined capabilities + * - 0 if the HBA driver cannot set the capability to the requested value + * - 1 if the HBA driver is able to set the capability to the requested value + */ /*ARGSUSED*/ static int mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) { int rval = 1; - con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); /* We don't allow setting capabilities for other targets */ if (cap == NULL || whom == 0) { @@ -1584,12 +2198,25 @@ return (rval); } +/* + * tran_destroy_pkt - deallocate scsi_pkt structure + * @ap: + * @pkt: + * + * The tran_destroy_pkt() entry point is the HBA driver function that + * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is + * called when the target driver calls scsi_destroy_pkt(). The + * tran_destroy_pkt() entry point must free any DMA resources that have been + * allocated for the packet. An implicit DMA synchronization occurs if the + * DMA resources are freed and any cached data remains after the completion + * of the transfer. + */ static void mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) { struct scsa_cmd *acmd = PKT2CMD(pkt); - con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); if (acmd->cmd_flags & CFLAG_DMAVALID) { acmd->cmd_flags &= ~CFLAG_DMAVALID; @@ -1605,6 +2232,18 @@ scsi_hba_pkt_free(ap, pkt); } +/* + * tran_dmafree - deallocates DMA resources + * @ap: + * @pkt: + * + * The tran_dmafree() entry point deallocates DMAQ resources that have been + * allocated for a scsi_pkt structure. The tran_dmafree() entry point is + * called when the target driver calls scsi_dmafree(). The tran_dmafree() must + * free only DMA resources allocated for a scsi_pkt structure, not the + * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is + * implicitly performed. + */ /*ARGSUSED*/ static void mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) @@ -1624,6 +2263,19 @@ } } +/* + * tran_sync_pkt - synchronize the DMA object allocated + * @ap: + * @pkt: + * + * The tran_sync_pkt() entry point synchronizes the DMA object allocated for + * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt() + * entry point is called when the target driver calls scsi_sync_pkt(). If the + * data transfer direction is a DMA read from device to memory, tran_sync_pkt() + * must synchronize the CPU's view of the data. If the data transfer direction + * is a DMA write from memory to device, tran_sync_pkt() must synchronize the + * device's view of the data. + */ /*ARGSUSED*/ static void mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) @@ -1639,6 +2291,25 @@ } } +/*ARGSUSED*/ +static int +mrsas_tran_quiesce(dev_info_t *dip) +{ + con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + + return (1); +} + +/*ARGSUSED*/ +static int +mrsas_tran_unquiesce(dev_info_t *dip) +{ + con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + + return (1); +} + + /* * mrsas_isr(caddr_t) * @@ -1654,15 +2325,29 @@ uint32_t producer; uint32_t consumer; uint32_t context; + int retval; struct mrsas_cmd *cmd; struct mrsas_header *hdr; struct scsi_pkt *pkt; + con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); ASSERT(instance); - if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && - !instance->func_ptr->intr_ack(instance)) { - return (DDI_INTR_UNCLAIMED); + if (instance->tbolt) { + mutex_enter(&instance->chip_mtx); + if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && + !(instance->func_ptr->intr_ack(instance))) { + mutex_exit(&instance->chip_mtx); + return (DDI_INTR_UNCLAIMED); + } + retval = mr_sas_tbolt_process_outstanding_cmd(instance); + mutex_exit(&instance->chip_mtx); + return (retval); + } else { + if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && + !instance->func_ptr->intr_ack(instance)) { + return (DDI_INTR_UNCLAIMED); + } } (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, @@ -1681,7 +2366,7 @@ #ifdef OCRDEBUG if (debug_consecutive_timeout_after_ocr_g == 1) { con_log(CL_ANN1, (CE_NOTE, - "simulating consecutive timeout after ocr")); + "simulating consecutive timeout after ocr")); return (DDI_INTR_CLAIMED); } #endif @@ -1694,10 +2379,10 @@ consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, instance->consumer); - con_log(CL_ANN1, (CE_NOTE, " producer %x consumer %x ", + con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ", producer, consumer)); if (producer == consumer) { - con_log(CL_ANN1, (CE_WARN, "producer = consumer case")); + con_log(CL_ANN, (CE_WARN, "producer == consumer case")); DTRACE_PROBE2(isr_pc_err, uint32_t, producer, uint32_t, consumer); mutex_exit(&instance->cmd_pend_mtx); @@ -1711,10 +2396,10 @@ cmd = instance->cmd_list[context]; if (cmd->sync_cmd == MRSAS_TRUE) { - hdr = (struct mrsas_header *)&cmd->frame->hdr; - if (hdr) { - mlist_del_init(&cmd->list); - } + hdr = (struct mrsas_header *)&cmd->frame->hdr; + if (hdr) { + mlist_del_init(&cmd->list); + } } else { pkt = cmd->pkt; if (pkt) { @@ -1761,9 +2446,9 @@ /* * ************************************************************************** * - * * - * libraries * - * * + * * + * libraries * + * * * ************************************************************************** * */ /* @@ -1779,11 +2464,10 @@ static struct mrsas_cmd * get_mfi_pkt(struct mrsas_instance *instance) { - mlist_t *head = &instance->cmd_pool_list; + mlist_t *head = &instance->cmd_pool_list; struct mrsas_cmd *cmd = NULL; mutex_enter(&instance->cmd_pool_mtx); - ASSERT(mutex_owned(&instance->cmd_pool_mtx)); if (!mlist_empty(head)) { cmd = mlist_entry(head->next, struct mrsas_cmd, list); @@ -1793,6 +2477,7 @@ cmd->pkt = NULL; cmd->retry_count_for_ocr = 0; cmd->drv_pkt_time = 0; + } mutex_exit(&instance->cmd_pool_mtx); @@ -1806,14 +2491,17 @@ struct mrsas_cmd *cmd = NULL; mutex_enter(&instance->app_cmd_pool_mtx); - ASSERT(mutex_owned(&instance->app_cmd_pool_mtx)); if (!mlist_empty(head)) { cmd = mlist_entry(head->next, struct mrsas_cmd, list); mlist_del_init(head->next); } - if (cmd != NULL) + if (cmd != NULL) { cmd->pkt = NULL; + cmd->retry_count_for_ocr = 0; + cmd->drv_pkt_time = 0; + } + mutex_exit(&instance->app_cmd_pool_mtx); return (cmd); @@ -1825,7 +2513,6 @@ return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) { mutex_enter(&instance->cmd_pool_mtx); - ASSERT(mutex_owned(&instance->cmd_pool_mtx)); /* use mlist_add_tail for debug assistance */ mlist_add_tail(&cmd->list, &instance->cmd_pool_list); @@ -1836,20 +2523,18 @@ return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) { mutex_enter(&instance->app_cmd_pool_mtx); - ASSERT(mutex_owned(&instance->app_cmd_pool_mtx)); mlist_add(&cmd->list, &instance->app_cmd_pool_list); mutex_exit(&instance->app_cmd_pool_mtx); } -static void +void push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) { struct scsi_pkt *pkt; struct mrsas_header *hdr; - con_log(CL_ANN1, (CE_NOTE, "push_pending_pkt(): Called\n")); + con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n")); mutex_enter(&instance->cmd_pend_mtx); - ASSERT(mutex_owned(&instance->cmd_pend_mtx)); mlist_del_init(&cmd->list); mlist_add_tail(&cmd->list, &instance->cmd_pend_list); if (cmd->sync_cmd == MRSAS_TRUE) { @@ -1861,15 +2546,15 @@ "time %llx", (void *)cmd, cmd->index, gethrtime())); - /* Wait for specified interval */ + /* Wait for specified interval */ cmd->drv_pkt_time = ddi_get16( cmd->frame_dma_obj.acc_handle, &hdr->timeout); if (cmd->drv_pkt_time < debug_timeout_g) cmd->drv_pkt_time = (uint16_t)debug_timeout_g; - con_log(CL_ANN1, (CE_CONT, - "push_pending_pkt(): " - "Called IO Timeout Value %x\n", - cmd->drv_pkt_time)); + con_log(CL_ANN1, (CE_CONT, + "push_pending_pkt(): " + "Called IO Timeout Value %x\n", + cmd->drv_pkt_time)); } if (hdr && instance->timeout_id == (timeout_id_t)-1) { instance->timeout_id = timeout(io_timeout_checker, @@ -1893,9 +2578,10 @@ } mutex_exit(&instance->cmd_pend_mtx); + } -static int +int mrsas_print_pending_cmds(struct mrsas_instance *instance) { mlist_t *head = &instance->cmd_pend_list; @@ -1903,47 +2589,72 @@ struct mrsas_cmd *cmd = NULL; struct mrsas_header *hdr; unsigned int flag = 1; - struct scsi_pkt *pkt; - con_log(CL_ANN1, (CE_NOTE, - "mrsas_print_pending_cmds(): Called")); + int saved_level; + int cmd_count = 0; + + saved_level = debug_level_g; + debug_level_g = CL_ANN1; + + cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n"); + while (flag) { mutex_enter(&instance->cmd_pend_mtx); tmp = tmp->next; if (tmp == head) { mutex_exit(&instance->cmd_pend_mtx); flag = 0; + con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():" + " NO MORE CMDS PENDING....\n")); break; } else { cmd = mlist_entry(tmp, struct mrsas_cmd, list); mutex_exit(&instance->cmd_pend_mtx); if (cmd) { if (cmd->sync_cmd == MRSAS_TRUE) { - hdr = (struct mrsas_header *)&cmd->frame->hdr; + hdr = (struct mrsas_header *) + &cmd->frame->hdr; if (hdr) { - con_log(CL_ANN1, (CE_CONT, - "print: cmd %p index %x hdr %p", - (void *)cmd, cmd->index, - (void *)hdr)); + con_log(CL_ANN1, (CE_CONT, + "print: cmd %p index 0x%x " + "drv_pkt_time 0x%x (NO-PKT)" + " hdr %p\n", (void *)cmd, + cmd->index, + cmd->drv_pkt_time, + (void *)hdr)); } } else { pkt = cmd->pkt; if (pkt) { con_log(CL_ANN1, (CE_CONT, - "print: cmd %p index %x " - "pkt %p", (void *)cmd, cmd->index, - (void *)pkt)); + "print: cmd %p index 0x%x " + "drv_pkt_time 0x%x pkt %p \n", + (void *)cmd, cmd->index, + cmd->drv_pkt_time, (void *)pkt)); } } + + if (++cmd_count == 1) { + mrsas_print_cmd_details(instance, cmd, + 0xDD); + } else { + mrsas_print_cmd_details(instance, cmd, + 1); + } + } } } - con_log(CL_ANN1, (CE_NOTE, "mrsas_print_pending_cmds(): Done\n")); + con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n")); + + + debug_level_g = saved_level; + return (DDI_SUCCESS); } -static int +int mrsas_complete_pending_cmds(struct mrsas_instance *instance) { @@ -1968,7 +2679,7 @@ = CMD_DEV_GONE; pkt->pkt_statistics = STAT_DISCON; - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "fail and posting to scsa " "cmd %p index %x" " pkt %p " @@ -1980,7 +2691,7 @@ } else { /* for DCMDS */ if (cmd->sync_cmd == MRSAS_TRUE) { hdr = (struct mrsas_header *)&cmd->frame->hdr; - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "posting invalid status to application " "cmd %p index %x" " hdr %p " @@ -1993,22 +2704,92 @@ } mlist_del_init(&cmd->list); } else { - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds:" "NULL command\n")); } - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds:" "looping for more commands\n")); } mutex_exit(&instance->cmd_pend_mtx); - con_log(CL_ANN1, (CE_NOTE, "mrsas_complete_pending_cmds(): DONE\n")); + con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n")); return (DDI_SUCCESS); } - -static int +void +mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd, + int detail) +{ + struct scsi_pkt *pkt = cmd->pkt; + Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request; + int i; + int saved_level; + ddi_acc_handle_t acc_handle = + instance->mpi2_frame_pool_dma_obj.acc_handle; + + if (detail == 0xDD) { + saved_level = debug_level_g; + debug_level_g = CL_ANN1; + } + + + if (instance->tbolt) { + con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p " + "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n", + (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time)); + } else { + con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p " + "cmd->index 0x%x timer 0x%x sec\n", + (void *)cmd, cmd->index, cmd->drv_pkt_time)); + } + + if (pkt) { + con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x", + pkt->pkt_cdbp[0])); + } else { + con_log(CL_ANN1, (CE_CONT, "NO-PKT")); + } + + if ((detail == 0xDD) && instance->tbolt) { + con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n")); + con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X " + "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n", + ddi_get16(acc_handle, &scsi_io->DevHandle), + ddi_get8(acc_handle, &scsi_io->Function), + ddi_get16(acc_handle, &scsi_io->IoFlags), + ddi_get16(acc_handle, &scsi_io->SGLFlags), + ddi_get32(acc_handle, &scsi_io->DataLength))); + + for (i = 0; i < 32; i++) { + con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i, + ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i]))); + } + + con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n")); + con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X " + "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X " + "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64 + " regLockLength=0x%X spanArm=0x%X\n", + ddi_get8(acc_handle, &scsi_io->RaidContext.status), + ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus), + ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId), + ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue), + ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags), + ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags), + ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA), + ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength), + ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm))); + } + + if (detail == 0xDD) { + debug_level_g = saved_level; + } +} + + +int mrsas_issue_pending_cmds(struct mrsas_instance *instance) { mlist_t *head = &instance->cmd_pend_list; @@ -2023,53 +2804,79 @@ tmp = tmp->next; mutex_exit(&instance->cmd_pend_mtx); if (cmd) { - con_log(CL_ANN1, (CE_NOTE, - "mrsas_issue_pending_cmds(): " - "Got a cmd: cmd:%p\n", (void *)cmd)); - cmd->retry_count_for_ocr++; - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): " - "cmd retry count = %d\n", - cmd->retry_count_for_ocr)); + "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ", + (void *)cmd, cmd->index, cmd->drv_pkt_time)); + + /* Reset command timeout value */ + if (cmd->drv_pkt_time < debug_timeout_g) + cmd->drv_pkt_time = (uint16_t)debug_timeout_g; + + cmd->retry_count_for_ocr++; + + cmn_err(CE_CONT, "cmd retry count = %d\n", + cmd->retry_count_for_ocr); + if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) { - con_log(CL_ANN1, (CE_NOTE, + cmn_err(CE_WARN, "mrsas_issue_pending_cmds(): " + "cmd->retry_count exceeded limit >%d\n", + IO_RETRY_COUNT); + mrsas_print_cmd_details(instance, cmd, 0xDD); + + cmn_err(CE_WARN, "mrsas_issue_pending_cmds():" - "Calling Kill Adapter\n")); - (void) mrsas_kill_adapter(instance); + "Calling KILL Adapter\n"); + if (instance->tbolt) + mrsas_tbolt_kill_adapter(instance); + else + (void) mrsas_kill_adapter(instance); return (DDI_FAILURE); } + pkt = cmd->pkt; if (pkt) { - con_log(CL_ANN1, (CE_NOTE, - "PENDING ISSUE: cmd %p index %x " + con_log(CL_ANN1, (CE_CONT, + "PENDING PKT-CMD ISSUE: cmd %p index %x " "pkt %p time %llx", (void *)cmd, cmd->index, (void *)pkt, gethrtime())); + } else { + cmn_err(CE_CONT, + "mrsas_issue_pending_cmds(): NO-PKT, " + "cmd %p index 0x%x drv_pkt_time 0x%x ", + (void *)cmd, cmd->index, cmd->drv_pkt_time); } + + if (cmd->sync_cmd == MRSAS_TRUE) { + cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): " + "SYNC_CMD == TRUE \n"); instance->func_ptr->issue_cmd_in_sync_mode( instance, cmd); } else { instance->func_ptr->issue_cmd(cmd, instance); } } else { - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds: NULL command\n")); } - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds:" "looping for more commands")); } - con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): DONE\n")); + con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n")); return (DDI_SUCCESS); } + + /* * destroy_mfi_frame_pool */ -static void +void destroy_mfi_frame_pool(struct mrsas_instance *instance) { int i; @@ -2078,7 +2885,8 @@ struct mrsas_cmd *cmd; /* return all frames to pool */ - for (i = 0; i < max_cmd+1; i++) { + + for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; @@ -2093,7 +2901,7 @@ /* * create_mfi_frame_pool */ -static int +int create_mfi_frame_pool(struct mrsas_instance *instance) { int i = 0; @@ -2103,11 +2911,10 @@ uint32_t sgl_sz; uint32_t tot_frame_size; struct mrsas_cmd *cmd; + int retval = DDI_SUCCESS; max_cmd = instance->max_fw_cmds; - sge_sz = sizeof (struct mrsas_sge_ieee); - /* calculated the number of 64byte frames required for SGL */ sgl_sz = sge_sz * instance->max_num_sge; tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH; @@ -2115,7 +2922,7 @@ con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); - while (i < max_cmd+1) { + while (i < max_cmd) { cmd = instance->cmd_list[i]; cmd->frame_dma_obj.size = tot_frame_size; @@ -2125,14 +2932,14 @@ cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; - cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj, (uchar_t)DDI_STRUCTURE_LE_ACC); if (cookie_cnt == -1 || cookie_cnt > 1) { - con_log(CL_ANN, (CE_WARN, - "create_mfi_frame_pool: could not alloc.")); - return (DDI_FAILURE); + cmn_err(CE_WARN, + "create_mfi_frame_pool: could not alloc."); + retval = DDI_FAILURE; + goto mrsas_undo_frame_pool; } bzero(cmd->frame_dma_obj.buffer, tot_frame_size); @@ -2150,10 +2957,10 @@ tot_frame_size - SENSE_LENGTH; if (!cmd->frame || !cmd->sense) { - con_log(CL_ANN, (CE_NOTE, - "mr_sas: pci_pool_alloc failed")); - - return (ENOMEM); + cmn_err(CE_WARN, + "mr_sas: pci_pool_alloc failed"); + retval = ENOMEM; + goto mrsas_undo_frame_pool; } ddi_put32(cmd->frame_dma_obj.acc_handle, @@ -2165,6 +2972,12 @@ } return (DDI_SUCCESS); + +mrsas_undo_frame_pool: + if (i > 0) + destroy_mfi_frame_pool(instance); + + return (retval); } /* @@ -2207,8 +3020,8 @@ if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj, (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { - con_log(CL_ANN, (CE_WARN, - "mr_sas: could not alloc reply queue")); + cmn_err(CE_WARN, + "mr_sas: could not alloc reply queue"); return (DDI_FAILURE); } @@ -2240,9 +3053,9 @@ if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj, (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { - con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " - "could not allocate data transfer buffer.")); - return (DDI_FAILURE); + cmn_err(CE_WARN, "alloc_additional_dma_buffer: " + "could not allocate data transfer buffer."); + goto mrsas_undo_internal_buff; } bzero(instance->mfi_evt_detail_obj.buffer, @@ -2251,44 +3064,163 @@ instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; return (DDI_SUCCESS); + +mrsas_undo_internal_buff: + if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { + (void) mrsas_free_dma_obj(instance, + instance->mfi_internal_dma_obj); + instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; + } + + return (DDI_FAILURE); } + +void +mrsas_free_cmd_pool(struct mrsas_instance *instance) +{ + int i; + uint32_t max_cmd; + size_t sz; + + /* already freed */ + if (instance->cmd_list == NULL) { + return; + } + + max_cmd = instance->max_fw_cmds; + + /* size of cmd_list array */ + sz = sizeof (struct mrsas_cmd *) * max_cmd; + + /* First free each cmd */ + for (i = 0; i < max_cmd; i++) { + if (instance->cmd_list[i] != NULL) { + kmem_free(instance->cmd_list[i], + sizeof (struct mrsas_cmd)); + } + + instance->cmd_list[i] = NULL; + } + + /* Now, free cmd_list array */ + if (instance->cmd_list != NULL) + kmem_free(instance->cmd_list, sz); + + instance->cmd_list = NULL; + + INIT_LIST_HEAD(&instance->cmd_pool_list); + INIT_LIST_HEAD(&instance->cmd_pend_list); + if (instance->tbolt) { + INIT_LIST_HEAD(&instance->cmd_app_pool_list); + } else { + INIT_LIST_HEAD(&instance->app_cmd_pool_list); + } + +} + + +/* + * mrsas_alloc_cmd_pool + */ +int +mrsas_alloc_cmd_pool(struct mrsas_instance *instance) +{ + int i; + int count; + uint32_t max_cmd; + uint32_t reserve_cmd; + size_t sz; + + struct mrsas_cmd *cmd; + + max_cmd = instance->max_fw_cmds; + con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: " + "max_cmd %x", max_cmd)); + + + sz = sizeof (struct mrsas_cmd *) * max_cmd; + + /* + * instance->cmd_list is an array of struct mrsas_cmd pointers. + * Allocate the dynamic array first and then allocate individual + * commands. + */ + instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); + ASSERT(instance->cmd_list); + + /* create a frame pool and assign one frame to each cmd */ + for (count = 0; count < max_cmd; count++) { + instance->cmd_list[count] = + kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP); + ASSERT(instance->cmd_list[count]); + } + + /* add all the commands to command pool */ + + INIT_LIST_HEAD(&instance->cmd_pool_list); + INIT_LIST_HEAD(&instance->cmd_pend_list); + INIT_LIST_HEAD(&instance->app_cmd_pool_list); + + reserve_cmd = MRSAS_APP_RESERVED_CMDS; + + for (i = 0; i < reserve_cmd; i++) { + cmd = instance->cmd_list[i]; + cmd->index = i; + mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list); + } + + + for (i = reserve_cmd; i < max_cmd; i++) { + cmd = instance->cmd_list[i]; + cmd->index = i; + mlist_add_tail(&cmd->list, &instance->cmd_pool_list); + } + + return (DDI_SUCCESS); + +mrsas_undo_cmds: + if (count > 0) { + /* free each cmd */ + for (i = 0; i < count; i++) { + if (instance->cmd_list[i] != NULL) { + kmem_free(instance->cmd_list[i], + sizeof (struct mrsas_cmd)); + } + instance->cmd_list[i] = NULL; + } + } + +mrsas_undo_cmd_list: + if (instance->cmd_list != NULL) + kmem_free(instance->cmd_list, sz); + instance->cmd_list = NULL; + + return (DDI_FAILURE); +} + + /* * free_space_for_mfi */ static void free_space_for_mfi(struct mrsas_instance *instance) { - int i; - uint32_t max_cmd = instance->max_fw_cmds; /* already freed */ if (instance->cmd_list == NULL) { return; } + /* Free additional dma buffer */ free_additional_dma_buffer(instance); - /* first free the MFI frame pool */ + /* Free the MFI frame pool */ destroy_mfi_frame_pool(instance); - /* free all the commands in the cmd_list */ - for (i = 0; i < instance->max_fw_cmds+1; i++) { - kmem_free(instance->cmd_list[i], - sizeof (struct mrsas_cmd)); - - instance->cmd_list[i] = NULL; - } - - /* free the cmd_list buffer itself */ - kmem_free(instance->cmd_list, - sizeof (struct mrsas_cmd *) * (max_cmd+1)); - - instance->cmd_list = NULL; - - INIT_LIST_HEAD(&instance->cmd_pool_list); - INIT_LIST_HEAD(&instance->app_cmd_pool_list); - INIT_LIST_HEAD(&instance->cmd_pend_list); + /* Free all the commands in the cmd_list */ + /* Free the cmd_list buffer itself */ + mrsas_free_cmd_pool(instance); } /* @@ -2297,72 +3229,37 @@ static int alloc_space_for_mfi(struct mrsas_instance *instance) { - int i; - uint32_t max_cmd; - uint32_t reserve_cmd; - size_t sz; - - struct mrsas_cmd *cmd; - - max_cmd = instance->max_fw_cmds; - - /* reserve 1 more slot for flush_cache */ - sz = sizeof (struct mrsas_cmd *) * (max_cmd+1); - - /* - * instance->cmd_list is an array of struct mrsas_cmd pointers. - * Allocate the dynamic array first and then allocate individual - * commands. - */ - instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); - ASSERT(instance->cmd_list); - - for (i = 0; i < max_cmd+1; i++) { - instance->cmd_list[i] = kmem_zalloc(sizeof (struct mrsas_cmd), - KM_SLEEP); - ASSERT(instance->cmd_list[i]); - } - - INIT_LIST_HEAD(&instance->cmd_pool_list); - INIT_LIST_HEAD(&instance->cmd_pend_list); - /* add all the commands to command pool (instance->cmd_pool) */ - reserve_cmd = APP_RESERVE_CMDS; - INIT_LIST_HEAD(&instance->app_cmd_pool_list); - for (i = 0; i < reserve_cmd-1; i++) { - cmd = instance->cmd_list[i]; - cmd->index = i; - mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list); - } - /* - * reserve slot instance->cmd_list[APP_RESERVE_CMDS-1] - * for abort_aen_cmd - */ - for (i = reserve_cmd; i < max_cmd; i++) { - cmd = instance->cmd_list[i]; - cmd->index = i; - mlist_add_tail(&cmd->list, &instance->cmd_pool_list); - } - - /* single slot for flush_cache won't be added in command pool */ - cmd = instance->cmd_list[max_cmd]; - cmd->index = i; - - /* create a frame pool and assign one frame to each cmd */ + /* Allocate command pool (memory for cmd_list & individual commands) */ + if (mrsas_alloc_cmd_pool(instance)) { + cmn_err(CE_WARN, "error creating cmd pool"); + return (DDI_FAILURE); + } + + /* Allocate MFI Frame pool */ if (create_mfi_frame_pool(instance)) { - con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); - return (DDI_FAILURE); - } - - /* create a frame pool and assign one frame to each cmd */ + cmn_err(CE_WARN, "error creating frame DMA pool"); + goto mfi_undo_cmd_pool; + } + + /* Allocate additional DMA buffer */ if (alloc_additional_dma_buffer(instance)) { - con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); - return (DDI_FAILURE); + cmn_err(CE_WARN, "error creating frame DMA pool"); + goto mfi_undo_frame_pool; } return (DDI_SUCCESS); + +mfi_undo_frame_pool: + destroy_mfi_frame_pool(instance); + +mfi_undo_cmd_pool: + mrsas_free_cmd_pool(instance); + + return (DDI_FAILURE); } + /* * get_ctrl_info */ @@ -2376,7 +3273,11 @@ struct mrsas_dcmd_frame *dcmd; struct mrsas_ctrl_info *ci; - cmd = get_mfi_pkt(instance); + if (instance->tbolt) { + cmd = get_raid_msg_mfi_pkt(instance); + } else { + cmd = get_mfi_pkt(instance); + } if (!cmd) { con_log(CL_ANN, (CE_WARN, @@ -2385,7 +3286,7 @@ uint16_t, instance->max_fw_cmds); return (DDI_FAILURE); } - cmd->retry_count_for_ocr = 0; + /* Clear the frame buffer and assign back the context id */ (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, @@ -2396,8 +3297,8 @@ ci = (struct mrsas_ctrl_info *)instance->internal_buf; if (!ci) { - con_log(CL_ANN, (CE_WARN, - "Failed to alloc mem for ctrl info")); + cmn_err(CE_WARN, + "Failed to alloc mem for ctrl info"); return_mfi_pkt(instance, cmd); return (DDI_FAILURE); } @@ -2425,33 +3326,40 @@ cmd->frame_count = 1; + if (instance->tbolt) { + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + } + if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { - ret = 0; - - ctrl_info->max_request_size = ddi_get32( - cmd->frame_dma_obj.acc_handle, &ci->max_request_size); - - ctrl_info->ld_present_count = ddi_get16( - cmd->frame_dma_obj.acc_handle, &ci->ld_present_count); - - ctrl_info->properties.on_off_properties = - ddi_get32(cmd->frame_dma_obj.acc_handle, - &ci->properties.on_off_properties); - - ddi_rep_get8(cmd->frame_dma_obj.acc_handle, - (uint8_t *)(ctrl_info->product_name), - (uint8_t *)(ci->product_name), 80 * sizeof (char), - DDI_DEV_AUTOINCR); - /* should get more members of ci with ddi_get when needed */ + ret = 0; + + ctrl_info->max_request_size = ddi_get32( + cmd->frame_dma_obj.acc_handle, &ci->max_request_size); + + ctrl_info->ld_present_count = ddi_get16( + cmd->frame_dma_obj.acc_handle, &ci->ld_present_count); + + ctrl_info->properties.on_off_properties = ddi_get32( + cmd->frame_dma_obj.acc_handle, + &ci->properties.on_off_properties); + ddi_rep_get8(cmd->frame_dma_obj.acc_handle, + (uint8_t *)(ctrl_info->product_name), + (uint8_t *)(ci->product_name), 80 * sizeof (char), + DDI_DEV_AUTOINCR); + /* should get more members of ci with ddi_get when needed */ } else { - con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed")); + cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed"); ret = -1; } if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { ret = -1; } - return_mfi_pkt(instance, cmd); + if (instance->tbolt) { + return_raid_msg_mfi_pkt(instance, cmd); + } else { + return_mfi_pkt(instance, cmd); + } return (ret); } @@ -2468,7 +3376,13 @@ struct mrsas_cmd *cmd; struct mrsas_abort_frame *abort_fr; - cmd = instance->cmd_list[APP_RESERVE_CMDS-1]; + con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__)); + + if (instance->tbolt) { + cmd = get_raid_msg_mfi_pkt(instance); + } else { + cmd = get_mfi_pkt(instance); + } if (!cmd) { con_log(CL_ANN1, (CE_WARN, @@ -2477,7 +3391,7 @@ uint16_t, instance->max_fw_cmds); return (DDI_FAILURE); } - cmd->retry_count_for_ocr = 0; + /* Clear the frame buffer and assign back the context id */ (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, @@ -2500,9 +3414,12 @@ instance->aen_cmd->abort_aen = 1; - cmd->sync_cmd = MRSAS_TRUE; cmd->frame_count = 1; + if (instance->tbolt) { + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + } + if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { con_log(CL_ANN1, (CE_WARN, "abort_aen_cmd: issue_cmd_in_poll_mode failed")); @@ -2514,49 +3431,27 @@ instance->aen_cmd->abort_aen = 1; instance->aen_cmd = 0; + if (instance->tbolt) { + return_raid_msg_mfi_pkt(instance, cmd); + } else { + return_mfi_pkt(instance, cmd); + } + atomic_add_16(&instance->fw_outstanding, (-1)); return (ret); } -/* - * init_mfi - */ static int -init_mfi(struct mrsas_instance *instance) +mrsas_build_init_cmd(struct mrsas_instance *instance, + struct mrsas_cmd **cmd_ptr) { struct mrsas_cmd *cmd; - struct mrsas_ctrl_info ctrl_info; struct mrsas_init_frame *init_frame; struct mrsas_init_queue_info *initq_info; - - /* we expect the FW state to be READY */ - if (mfi_state_transition_to_ready(instance)) { - con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready")); - goto fail_ready_state; - } - - /* get various operational parameters from status register */ - instance->max_num_sge = - (instance->func_ptr->read_fw_status_reg(instance) & - 0xFF0000) >> 0x10; - /* - * Reduce the max supported cmds by 1. This is to ensure that the - * reply_q_sz (1 more than the max cmd that driver may send) - * does not exceed max cmds that the FW can support - */ - instance->max_fw_cmds = - instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; - instance->max_fw_cmds = instance->max_fw_cmds - 1; - - instance->max_num_sge = - (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ? - MRSAS_MAX_SGE_CNT : instance->max_num_sge; - - /* create a pool of commands */ - if (alloc_space_for_mfi(instance) != DDI_SUCCESS) - goto fail_alloc_fw_space; + struct mrsas_drv_ver drv_ver_info; + /* * Prepare a init frame. Note the init frame points to queue info @@ -2564,8 +3459,8 @@ * this frame - since we don't need any SGL - we use SGL's space as * queue info structure */ - cmd = get_mfi_pkt(instance); - cmd->retry_count_for_ocr = 0; + cmd = *cmd_ptr; + /* Clear the frame buffer and assign back the context id */ (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); @@ -2613,22 +3508,87 @@ ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->queue_info_new_phys_addr_hi, 0); + + /* fill driver version information */ + fill_up_drv_ver(&drv_ver_info); + + /* allocate the driver version data transfer buffer */ + instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver); + instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr; + instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; + instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; + instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1; + instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1; + + if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj, + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { + con_log(CL_ANN, (CE_WARN, + "init_mfi : Could not allocate driver version buffer.")); + return (DDI_FAILURE); + } + /* copy driver version to dma buffer */ + (void) memset(instance->drv_ver_dma_obj.buffer, 0, + sizeof (drv_ver_info.drv_ver)); + ddi_rep_put8(cmd->frame_dma_obj.acc_handle, + (uint8_t *)drv_ver_info.drv_ver, + (uint8_t *)instance->drv_ver_dma_obj.buffer, + sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR); + + + /* copy driver version physical address to init frame */ + ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion, + instance->drv_ver_dma_obj.dma_cookie[0].dmac_address); + ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, sizeof (struct mrsas_init_queue_info)); cmd->frame_count = 1; - /* issue the init frame in polled mode */ + *cmd_ptr = cmd; + + return (DDI_SUCCESS); +} + + +/* + * mrsas_init_adapter_ppc - Initialize MFI interface adapter. + */ +int +mrsas_init_adapter_ppc(struct mrsas_instance *instance) +{ + struct mrsas_cmd *cmd; + + /* + * allocate memory for mfi adapter(cmd pool, individual commands, mfi + * frames etc + */ + if (alloc_space_for_mfi(instance) != DDI_SUCCESS) { + con_log(CL_ANN, (CE_NOTE, + "Error, failed to allocate memory for MFI adapter")); + return (DDI_FAILURE); + } + + /* Build INIT command */ + cmd = get_mfi_pkt(instance); + + if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) { + con_log(CL_ANN, + (CE_NOTE, "Error, failed to build INIT command")); + + goto fail_undo_alloc_mfi_space; + } + + /* + * Disable interrupt before sending init frame ( see linux driver code) + * send INIT MFI frame in polled mode + */ if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { con_log(CL_ANN, (CE_WARN, "failed to init firmware")); - return_mfi_pkt(instance, cmd); goto fail_fw_init; } - if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { - return_mfi_pkt(instance, cmd); + if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) goto fail_fw_init; - } return_mfi_pkt(instance, cmd); if (ctio_enable && @@ -2639,8 +3599,67 @@ instance->flag_ieee = 0; } + instance->unroll.alloc_space_mfi = 1; + instance->unroll.verBuff = 1; + + return (DDI_SUCCESS); + + +fail_fw_init: + (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj); + +fail_undo_alloc_mfi_space: + return_mfi_pkt(instance, cmd); + free_space_for_mfi(instance); + + return (DDI_FAILURE); + +} + +/* + * mrsas_init_adapter - Initialize adapter. + */ +int +mrsas_init_adapter(struct mrsas_instance *instance) +{ + struct mrsas_ctrl_info ctrl_info; + + + /* we expect the FW state to be READY */ + if (mfi_state_transition_to_ready(instance)) { + con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready")); + return (DDI_FAILURE); + } + + /* get various operational parameters from status register */ + instance->max_num_sge = + (instance->func_ptr->read_fw_status_reg(instance) & + 0xFF0000) >> 0x10; + instance->max_num_sge = + (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ? + MRSAS_MAX_SGE_CNT : instance->max_num_sge; + + /* + * Reduce the max supported cmds by 1. This is to ensure that the + * reply_q_sz (1 more than the max cmd that driver may send) + * does not exceed max cmds that the FW can support + */ + instance->max_fw_cmds = + instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; + instance->max_fw_cmds = instance->max_fw_cmds - 1; + + + + /* Initialize adapter */ + if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) { + con_log(CL_ANN, + (CE_WARN, "mr_sas: could not initialize adapter")); + return (DDI_FAILURE); + } + + /* gather misc FW related information */ instance->disable_online_ctrl_reset = 0; - /* gather misc FW related information */ + if (!get_ctrl_info(instance, &ctrl_info)) { instance->max_sectors_per_req = ctrl_info.max_request_size; con_log(CL_ANN1, (CE_NOTE, @@ -2656,23 +3675,10 @@ return (DDI_SUCCESS); -fail_fw_init: -fail_alloc_fw_space: - - free_space_for_mfi(instance); - -fail_ready_state: - ddi_regs_map_free(&instance->regmap_handle); - -fail_mfi_reg_setup: - return (DDI_FAILURE); } - - - static int mrsas_issue_init_mfi(struct mrsas_instance *instance) { @@ -2691,7 +3697,7 @@ cmd = get_mfi_app_pkt(instance); if (!cmd) { - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_WARN, "mrsas_issue_init_mfi: get_pkt failed\n")); return (DDI_FAILURE); } @@ -2753,8 +3759,15 @@ return_mfi_app_pkt(instance, cmd); return (DDI_FAILURE); } + + if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { + return_mfi_pkt(instance, cmd); + return (DDI_FAILURE); + } + return_mfi_app_pkt(instance, cmd); - con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_init_mfi: Done")); + con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done")); + return (DDI_SUCCESS); } /* @@ -2762,31 +3775,32 @@ * * @reg_set : MFI register set */ -static int +int mfi_state_transition_to_ready(struct mrsas_instance *instance) { int i; uint8_t max_wait; - uint32_t fw_ctrl; + uint32_t fw_ctrl = 0; uint32_t fw_state; uint32_t cur_state; uint32_t cur_abs_reg_val; uint32_t prev_abs_reg_val; + uint32_t status; cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance); fw_state = cur_abs_reg_val & MFI_STATE_MASK; - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); while (fw_state != MFI_STATE_READY) { - con_log(CL_ANN, (CE_NOTE, + con_log(CL_ANN, (CE_CONT, "mfi_state_transition_to_ready:FW state%x", fw_state)); switch (fw_state) { case MFI_STATE_FAULT: - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN, (CE_NOTE, "mr_sas: FW in FAULT state!!")); return (ENODEV); @@ -2800,10 +3814,14 @@ * to be set */ /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ - WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | - MFI_INIT_HOTPLUG, instance); - - max_wait = 2; + if (!instance->tbolt) { + WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | + MFI_INIT_HOTPLUG, instance); + } else { + WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE | + MFI_INIT_HOTPLUG, instance); + } + max_wait = (instance->tbolt == 1) ? 180 : 2; cur_state = MFI_STATE_WAIT_HANDSHAKE; break; case MFI_STATE_BOOT_MESSAGE_PENDING: @@ -2815,9 +3833,13 @@ * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) * to be set */ - WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); - - max_wait = 10; + if (!instance->tbolt) { + WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); + } else { + WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG, + instance); + } + max_wait = (instance->tbolt == 1) ? 180 : 10; cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; break; case MFI_STATE_OPERATIONAL: @@ -2831,26 +3853,46 @@ * to be set */ /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ - WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); - - max_wait = 10; + if (!instance->tbolt) { + WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); + } else { + WR_RESERVED0_REGISTER(MFI_RESET_FLAGS, + instance); + + for (i = 0; i < (10 * 1000); i++) { + status = + RD_RESERVED0_REGISTER(instance); + if (status & 1) { + delay(1 * + drv_usectohz(MILLISEC)); + } else { + break; + } + } + + } + max_wait = (instance->tbolt == 1) ? 180 : 10; cur_state = MFI_STATE_OPERATIONAL; break; case MFI_STATE_UNDEFINED: /* this state should not last for more than 2 seconds */ con_log(CL_ANN1, (CE_NOTE, "FW state undefined")); - max_wait = 2; + max_wait = (instance->tbolt == 1) ? 180 : 2; cur_state = MFI_STATE_UNDEFINED; break; case MFI_STATE_BB_INIT: - max_wait = 2; + max_wait = (instance->tbolt == 1) ? 180 : 2; cur_state = MFI_STATE_BB_INIT; break; case MFI_STATE_FW_INIT: - max_wait = 2; + max_wait = (instance->tbolt == 1) ? 180 : 2; cur_state = MFI_STATE_FW_INIT; break; + case MFI_STATE_FW_INIT_2: + max_wait = 180; + cur_state = MFI_STATE_FW_INIT_2; + break; case MFI_STATE_DEVICE_SCAN: max_wait = 180; cur_state = MFI_STATE_DEVICE_SCAN; @@ -2858,6 +3900,10 @@ con_log(CL_NONE, (CE_NOTE, "Device scan in progress ...\n")); break; + case MFI_STATE_FLUSH_CACHE: + max_wait = 180; + cur_state = MFI_STATE_FLUSH_CACHE; + break; default: con_log(CL_ANN1, (CE_NOTE, "mr_sas: Unknown state 0x%x", fw_state)); @@ -2885,30 +3931,32 @@ /* return error if fw_state hasn't changed after max_wait */ if (fw_state == cur_state) { - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_WARN, "FW state hasn't changed in %d secs", max_wait)); return (ENODEV); } }; - fw_ctrl = RD_IB_DOORBELL(instance); - - con_log(CL_ANN1, (CE_NOTE, - "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); - - /* - * Write 0xF to the doorbell register to do the following. - * - Abort all outstanding commands (bit 0). - * - Transition from OPERATIONAL to READY state (bit 1). - * - Discard (possible) low MFA posted in 64-bit mode (bit-2). - * - Set to release FW to continue running (i.e. BIOS handshake - * (bit 3). - */ - WR_IB_DOORBELL(0xF, instance); + if (!instance->tbolt) { + fw_ctrl = RD_IB_DOORBELL(instance); + con_log(CL_ANN1, (CE_CONT, + "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); + + /* + * Write 0xF to the doorbell register to do the following. + * - Abort all outstanding commands (bit 0). + * - Transition from OPERATIONAL to READY state (bit 1). + * - Discard (possible) low MFA posted in 64-bit mode (bit-2). + * - Set to release FW to continue running (i.e. BIOS handshake + * (bit 3). + */ + WR_IB_DOORBELL(0xF, instance); + } if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { - return (ENODEV); - } + return (EIO); + } + return (DDI_SUCCESS); } @@ -2925,7 +3973,11 @@ struct mrsas_cmd *cmd; struct mrsas_dcmd_frame *dcmd; struct mrsas_evt_log_info *eli_tmp; - cmd = get_mfi_pkt(instance); + if (instance->tbolt) { + cmd = get_raid_msg_mfi_pkt(instance); + } else { + cmd = get_mfi_pkt(instance); + } if (!cmd) { cmn_err(CE_WARN, "mr_sas: failed to get a cmd"); @@ -2933,13 +3985,13 @@ instance->fw_outstanding, uint16_t, instance->max_fw_cmds); return (ENOMEM); } - cmd->retry_count_for_ocr = 0; + /* Clear the frame buffer and assign back the context id */ (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, cmd->index); - dcmd = &cmd->frame->dcmd; + dcmd = &cmd->frame->dcmd; /* allocate the data transfer buffer */ dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info); @@ -2951,8 +4003,8 @@ if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { - con_log(CL_ANN, (CE_WARN, - "get_seq_num: could not allocate data transfer buffer.")); + cmn_err(CE_WARN, + "get_seq_num: could not allocate data transfer buffer."); return (DDI_FAILURE); } @@ -2979,6 +4031,10 @@ cmd->sync_cmd = MRSAS_TRUE; cmd->frame_count = 1; + if (instance->tbolt) { + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + } + if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { cmn_err(CE_WARN, "get_seq_num: " "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO"); @@ -2993,11 +4049,11 @@ if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) ret = DDI_FAILURE; - if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { - ret = DDI_FAILURE; - } - - return_mfi_pkt(instance, cmd); + if (instance->tbolt) { + return_raid_msg_mfi_pkt(instance, cmd); + } else { + return_mfi_pkt(instance, cmd); + } return (ret); } @@ -3034,6 +4090,7 @@ return (-1); } + return (ret); } @@ -3045,9 +4102,11 @@ { struct mrsas_cmd *cmd = NULL; struct mrsas_dcmd_frame *dcmd; - uint32_t max_cmd = instance->max_fw_cmds; - - cmd = instance->cmd_list[max_cmd]; + if (instance->tbolt) { + cmd = get_raid_msg_mfi_pkt(instance); + } else { + cmd = get_mfi_pkt(instance); + } if (!cmd) { con_log(CL_ANN1, (CE_WARN, @@ -3056,7 +4115,7 @@ instance->fw_outstanding, uint16_t, instance->max_fw_cmds); return; } - cmd->retry_count_for_ocr = 0; + /* Clear the frame buffer and assign back the context id */ (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, @@ -3080,11 +4139,21 @@ cmd->frame_count = 1; + if (instance->tbolt) { + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + } + if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { con_log(CL_ANN1, (CE_WARN, "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH")); } - con_log(CL_ANN1, (CE_NOTE, "flush_cache done")); + con_log(CL_ANN1, (CE_CONT, "flush_cache done")); + if (instance->tbolt) { + return_raid_msg_mfi_pkt(instance, cmd); + } else { + return_mfi_pkt(instance, cmd); + } + } /* @@ -3093,7 +4162,7 @@ * @cmd: Command to be completed * */ -static void +void service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd) { uint32_t seq_num; @@ -3101,12 +4170,16 @@ (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer; int rval = 0; int tgt = 0; + uint8_t dtype; +#ifdef PDSUPPORT + mrsas_pd_address_t *pd_addr; +#endif ddi_acc_handle_t acc_handle; + con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + acc_handle = cmd->frame_dma_obj.acc_handle; - cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status); - if (cmd->cmd_status == ENODATA) { cmd->cmd_status = 0; } @@ -3125,7 +4198,7 @@ * Check for any ld devices that has changed state. i.e. online * or offline. */ - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "AEN: code = %x class = %x locale = %x args = %x", ddi_get32(acc_handle, &evt_detail->code), evt_detail->cl.members.class, @@ -3136,6 +4209,10 @@ case MR_EVT_CFG_CLEARED: { for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { if (instance->mr_ld_list[tgt].dip != NULL) { + mutex_enter(&instance->config_dev_mtx); + instance->mr_ld_list[tgt].flag = + (uint8_t)~MRDRV_TGT_VALID; + mutex_exit(&instance->config_dev_mtx); rval = mrsas_service_evt(instance, tgt, 0, MRSAS_EVT_UNCONFIG_TGT, NULL); con_log(CL_ANN1, (CE_WARN, @@ -3147,6 +4224,10 @@ } case MR_EVT_LD_DELETED: { + tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id); + mutex_enter(&instance->config_dev_mtx); + instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID; + mutex_exit(&instance->config_dev_mtx); rval = mrsas_service_evt(instance, ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, MRSAS_EVT_UNCONFIG_TGT, NULL); @@ -3167,6 +4248,86 @@ ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); break; } /* End of MR_EVT_LD_CREATED */ + +#ifdef PDSUPPORT + case MR_EVT_PD_REMOVED_EXT: { + if (instance->tbolt) { + pd_addr = &evt_detail->args.pd_addr; + dtype = pd_addr->scsi_dev_type; + con_log(CL_DLEVEL1, (CE_NOTE, + " MR_EVT_PD_REMOVED_EXT: dtype = %x," + " arg_type = %d ", dtype, evt_detail->arg_type)); + tgt = ddi_get16(acc_handle, + &evt_detail->args.pd.device_id); + mutex_enter(&instance->config_dev_mtx); + instance->mr_tbolt_pd_list[tgt].flag = + (uint8_t)~MRDRV_TGT_VALID; + mutex_exit(&instance->config_dev_mtx); + rval = mrsas_service_evt(instance, ddi_get16( + acc_handle, &evt_detail->args.pd.device_id), + 1, MRSAS_EVT_UNCONFIG_TGT, NULL); + con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:" + "rval = %d tgt id = %d ", rval, + ddi_get16(acc_handle, + &evt_detail->args.pd.device_id))); + } + break; + } /* End of MR_EVT_PD_REMOVED_EXT */ + + case MR_EVT_PD_INSERTED_EXT: { + if (instance->tbolt) { + rval = mrsas_service_evt(instance, + ddi_get16(acc_handle, + &evt_detail->args.pd.device_id), + 1, MRSAS_EVT_CONFIG_TGT, NULL); + con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:" + "rval = %d tgt id = %d ", rval, + ddi_get16(acc_handle, + &evt_detail->args.pd.device_id))); + } + break; + } /* End of MR_EVT_PD_INSERTED_EXT */ + + case MR_EVT_PD_STATE_CHANGE: { + if (instance->tbolt) { + tgt = ddi_get16(acc_handle, + &evt_detail->args.pd.device_id); + if ((evt_detail->args.pd_state.prevState == + PD_SYSTEM) && + (evt_detail->args.pd_state.newState != PD_SYSTEM)) { + mutex_enter(&instance->config_dev_mtx); + instance->mr_tbolt_pd_list[tgt].flag = + (uint8_t)~MRDRV_TGT_VALID; + mutex_exit(&instance->config_dev_mtx); + rval = mrsas_service_evt(instance, + ddi_get16(acc_handle, + &evt_detail->args.pd.device_id), + 1, MRSAS_EVT_UNCONFIG_TGT, NULL); + con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:" + "rval = %d tgt id = %d ", rval, + ddi_get16(acc_handle, + &evt_detail->args.pd.device_id))); + break; + } + if ((evt_detail->args.pd_state.prevState + == UNCONFIGURED_GOOD) && + (evt_detail->args.pd_state.newState == PD_SYSTEM)) { + rval = mrsas_service_evt(instance, + ddi_get16(acc_handle, + &evt_detail->args.pd.device_id), + 1, MRSAS_EVT_CONFIG_TGT, NULL); + con_log(CL_ANN1, (CE_WARN, + "mr_sas: PD_INSERTED: rval = %d " + " tgt id = %d ", rval, + ddi_get16(acc_handle, + &evt_detail->args.pd.device_id))); + break; + } + } + break; + } +#endif + } /* End of Main Switch */ /* get copy of seq_num and class/locale for re-registration */ @@ -3182,6 +4343,9 @@ cmd->frame_count = 1; + cmd->retry_count_for_ocr = 0; + cmd->drv_pkt_time = 0; + /* Issue the aen registration frame */ instance->func_ptr->issue_cmd(cmd, instance); } @@ -3204,14 +4368,16 @@ cmd->sync_cmd = MRSAS_FALSE; + con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n", + (void *)cmd)); + + mutex_enter(&instance->int_cmd_mtx); if (cmd->cmd_status == ENODATA) { cmd->cmd_status = 0; } - - con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n", - (void *)cmd)); - cv_broadcast(&instance->int_cmd_cv); + mutex_exit(&instance->int_cmd_mtx); + } /* @@ -3229,20 +4395,22 @@ cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance); fw_state = cur_abs_reg_val & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { - if (instance->disable_online_ctrl_reset == 1) { - con_log(CL_ANN1, (CE_NOTE, - "mrsas_initiate_ocr_if_fw_is_faulty: " - "FW in Fault state, detected in ISR: " - "FW doesn't support ocr ")); - return (ADAPTER_RESET_NOT_REQUIRED); + cmn_err(CE_WARN, + "mrsas_initiate_ocr_if_fw_is_faulty: " + "FW in Fault state, detected in ISR: " + "FW doesn't support ocr "); + + return (ADAPTER_RESET_NOT_REQUIRED); } else { - con_log(CL_ANN1, (CE_NOTE, - "mrsas_initiate_ocr_if_fw_is_faulty: " - "FW in Fault state, detected in ISR: FW supports ocr ")); + con_log(CL_ANN, (CE_NOTE, + "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault " + "state, detected in ISR: FW supports ocr ")); + return (ADAPTER_RESET_REQUIRED); } } + return (ADAPTER_RESET_NOT_REQUIRED); } @@ -3264,7 +4432,7 @@ struct mrsas_header *hdr; struct scsi_arq_status *arqstat; - con_log(CL_ANN1, (CE_CONT, "mrsas_softintr called")); + con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called.")); ASSERT(instance); @@ -3341,7 +4509,7 @@ | STATE_GOT_TARGET | STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; - con_log(CL_ANN1, (CE_CONT, + con_log(CL_ANN, (CE_CONT, "CDB[0] = %x completed for %s: size %lx context %x", pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), acmd->cmd_dmacount, hdr->context)); @@ -3394,17 +4562,15 @@ break; case MFI_STAT_SCSI_DONE_WITH_ERROR: - con_log(CL_ANN1, (CE_CONT, "scsi_done error")); + con_log(CL_ANN, (CE_CONT, "scsi_done error")); pkt->pkt_reason = CMD_CMPLT; ((struct scsi_status *) pkt->pkt_scbp)->sts_chk = 1; if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { - con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail")); - } else { pkt->pkt_state |= STATE_ARQ_DONE; arqstat = (void *)(pkt->pkt_scbp); @@ -3421,14 +4587,13 @@ (uint8_t *) &(arqstat->sts_sensedata), cmd->sense, - acmd->cmd_scblen - - offsetof(struct scsi_arq_status, - sts_sensedata), DDI_DEV_AUTOINCR); - } + sizeof (struct scsi_extended_sense), + DDI_DEV_AUTOINCR); + } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: - con_log(CL_ANN1, (CE_CONT, + con_log(CL_ANN, (CE_CONT, "mrsas_softintr:device not found error")); pkt->pkt_reason = CMD_DEV_GONE; pkt->pkt_statistics = STAT_DISCON; @@ -3488,19 +4653,22 @@ if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { - con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr: " + con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_softintr: " "posting to scsa cmd %p index %x pkt %p " "time %llx", (void *)cmd, cmd->index, (void *)pkt, gethrtime())); (*pkt->pkt_comp)(pkt); } + return_mfi_pkt(instance, cmd); break; + case MFI_CMD_OP_SMP: case MFI_CMD_OP_STP: complete_cmd_in_sync_mode(instance, cmd); break; + case MFI_CMD_OP_DCMD: /* see if got an event notification */ if (ddi_get32(cmd->frame_dma_obj.acc_handle, @@ -3521,14 +4689,16 @@ } break; + case MFI_CMD_OP_ABORT: - con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); + con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete")); /* * MFI_CMD_OP_ABORT successfully completed * in the synchronous mode */ complete_cmd_in_sync_mode(instance, cmd); break; + default: mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); @@ -3563,7 +4733,7 @@ * * Allocate the memory and other resources for an dma object. */ -static int +int mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj, uchar_t endian_flags) { @@ -3642,10 +4812,18 @@ * De-allocate the memory and other resources for an dma object, which must * have been alloated by a previous call to mrsas_alloc_dma_obj() */ -static int +int mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj) { + if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) { + return (DDI_SUCCESS); + } + + /* + * NOTE: These check-handle functions fail if *_handle == NULL, but + * this function succeeds because of the previous check. + */ if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) { ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); return (DDI_FAILURE); @@ -3659,7 +4837,7 @@ (void) ddi_dma_unbind_handle(obj.dma_handle); ddi_dma_mem_free(&obj.acc_handle); ddi_dma_free_handle(&obj.dma_handle); - + obj.acc_handle = NULL; return (DDI_SUCCESS); } @@ -3669,7 +4847,7 @@ * * Allocate dma resources for a new scsi command */ -static int +int mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)()) { @@ -3705,6 +4883,13 @@ tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; + if (instance->tbolt) { + /* OCR-RESET FIX */ + tmp_dma_attr.dma_attr_count_max = + (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */ + tmp_dma_attr.dma_attr_maxxfer = + (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */ + } if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { @@ -3816,7 +5001,7 @@ * move dma resources to next dma window * */ -static int +int mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt, struct buf *bp) { @@ -3886,14 +5071,15 @@ { uint16_t flags = 0; uint32_t i; - uint32_t context; + uint32_t context; uint32_t sge_bytes; + uint32_t tmp_data_xfer_len; ddi_acc_handle_t acc_handle; struct mrsas_cmd *cmd; struct mrsas_sge64 *mfi_sgl; struct mrsas_sge_ieee *mfi_sgl_ieee; struct scsa_cmd *acmd = PKT2CMD(pkt); - struct mrsas_pthru_frame *pthru; + struct mrsas_pthru_frame *pthru; struct mrsas_io_frame *ldio; /* find out if this is logical or physical drive command. */ @@ -3908,8 +5094,6 @@ return (NULL); } - cmd->retry_count_for_ocr = 0; - acc_handle = cmd->frame_dma_obj.acc_handle; /* Clear the frame buffer and assign back the context id */ @@ -3951,7 +5135,7 @@ /* * case SCMD_SYNCHRONIZE_CACHE: - * flush_cache(instance); + * flush_cache(instance); * return_mfi_pkt(instance, cmd); * *cmd_done = 1; * @@ -3962,6 +5146,10 @@ case SCMD_WRITE: case SCMD_READ_G1: case SCMD_WRITE_G1: + case SCMD_READ_G4: + case SCMD_WRITE_G4: + case SCMD_READ_G5: + case SCMD_WRITE_G5: if (acmd->islogical) { ldio = (struct mrsas_io_frame *)cmd->frame; @@ -4001,6 +5189,7 @@ context = ddi_get32(acc_handle, &ldio->context); if (acmd->cmd_cdblen == CDB_GROUP0) { + /* 6-byte cdb */ ddi_put32(acc_handle, &ldio->lba_count, ( (uint16_t)(pkt->pkt_cdbp[4]))); @@ -4010,6 +5199,7 @@ ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) << 16))); } else if (acmd->cmd_cdblen == CDB_GROUP1) { + /* 10-byte cdb */ ddi_put32(acc_handle, &ldio->lba_count, ( ((uint16_t)(pkt->pkt_cdbp[8])) | ((uint16_t)(pkt->pkt_cdbp[7]) << 8))); @@ -4019,24 +5209,26 @@ ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); - } else if (acmd->cmd_cdblen == CDB_GROUP2) { + } else if (acmd->cmd_cdblen == CDB_GROUP5) { + /* 12-byte cdb */ ddi_put32(acc_handle, &ldio->lba_count, ( - ((uint16_t)(pkt->pkt_cdbp[9])) | - ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | - ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | - ((uint16_t)(pkt->pkt_cdbp[6]) << 24))); + ((uint32_t)(pkt->pkt_cdbp[9])) | + ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | + ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | + ((uint32_t)(pkt->pkt_cdbp[6]) << 24))); ddi_put32(acc_handle, &ldio->start_lba_lo, ( ((uint32_t)(pkt->pkt_cdbp[5])) | ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); - } else if (acmd->cmd_cdblen == CDB_GROUP3) { + } else if (acmd->cmd_cdblen == CDB_GROUP4) { + /* 16-byte cdb */ ddi_put32(acc_handle, &ldio->lba_count, ( - ((uint16_t)(pkt->pkt_cdbp[13])) | - ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | - ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | - ((uint16_t)(pkt->pkt_cdbp[10]) << 24))); + ((uint32_t)(pkt->pkt_cdbp[13])) | + ((uint32_t)(pkt->pkt_cdbp[12]) << 8) | + ((uint32_t)(pkt->pkt_cdbp[11]) << 16) | + ((uint32_t)(pkt->pkt_cdbp[10]) << 24))); ddi_put32(acc_handle, &ldio->start_lba_lo, ( ((uint32_t)(pkt->pkt_cdbp[9])) | @@ -4044,7 +5236,7 @@ ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | ((uint32_t)(pkt->pkt_cdbp[6]) << 24))); - ddi_put32(acc_handle, &ldio->start_lba_lo, ( + ddi_put32(acc_handle, &ldio->start_lba_hi, ( ((uint32_t)(pkt->pkt_cdbp[5])) | ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | @@ -4090,8 +5282,12 @@ ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen); ddi_put16(acc_handle, &pthru->timeout, 0); ddi_put16(acc_handle, &pthru->flags, flags); + tmp_data_xfer_len = 0; + for (i = 0; i < acmd->cmd_cookiecnt; i++) { + tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size; + } ddi_put32(acc_handle, &pthru->data_xfer_len, - acmd->cmd_dmacount); + tmp_data_xfer_len); ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt); if (instance->flag_ieee) { mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl; @@ -4142,7 +5338,16 @@ return (cmd); } + #ifndef __sparc +/* + * wait_for_outstanding - Wait for all outstanding cmds + * @instance: Adapter soft state + * + * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to + * complete all its outstanding commands. Returns error if one or more IOs + * are pending after this time period. + */ static int wait_for_outstanding(struct mrsas_instance *instance) { @@ -4153,6 +5358,7 @@ if (!instance->fw_outstanding) { break; } + drv_usecwait(MILLISEC); /* wait for 1000 usecs */; } @@ -4162,7 +5368,8 @@ return (0); } -#endif /* __sparc */ +#endif /* __sparc */ + /* * issue_mfi_pthru */ @@ -4173,6 +5380,7 @@ void *ubuf; uint32_t kphys_addr = 0; uint32_t xferlen = 0; + uint32_t new_xfer_length = 0; uint_t model; ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; dma_obj_t pthru_dma_obj; @@ -4183,24 +5391,24 @@ kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0]; if (instance->adapterresetinprogress) { - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: Reset flag set, " + con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, " "returning mfi_pkt and setting TRAN_BUSY\n")); return (DDI_FAILURE); } model = ddi_model_convert_from(mode & FMODELS); if (model == DDI_MODEL_ILP32) { - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32")); xferlen = kpthru->sgl.sge32[0].length; ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; } else { #ifdef _ILP32 - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32")); xferlen = kpthru->sgl.sge32[0].length; ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; #else - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64")); xferlen = kpthru->sgl.sge64[0].length; ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; #endif @@ -4209,7 +5417,10 @@ if (xferlen) { /* means IOCTL requires DMA */ /* allocate the data transfer buffer */ - pthru_dma_obj.size = xferlen; + /* pthru_dma_obj.size = xferlen; */ + MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length, + PAGESIZE); + pthru_dma_obj.size = new_xfer_length; pthru_dma_obj.dma_attr = mrsas_generic_dma_attr; pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; @@ -4243,7 +5454,7 @@ } ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd); - ddi_put8(acc_handle, &pthru->sense_len, 0); + ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH); ddi_put8(acc_handle, &pthru->cmd_status, 0); ddi_put8(acc_handle, &pthru->scsi_status, 0); ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id); @@ -4254,8 +5465,8 @@ ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len); ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); - /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ - ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); + pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; + /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */ ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb, pthru->cdb_len, DDI_DEV_AUTOINCR); @@ -4267,6 +5478,10 @@ cmd->sync_cmd = MRSAS_TRUE; cmd->frame_count = 1; + if (instance->tbolt) { + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + } + if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: fw_ioctl failed")); @@ -4288,11 +5503,35 @@ kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status); kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status); - con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " + con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, " "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status)); DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t, kpthru->cmd_status, uint8_t, kpthru->scsi_status); + if (kpthru->sense_len) { + uint_t sense_len = SENSE_LENGTH; + void *sense_ubuf = + (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo; + if (kpthru->sense_len <= SENSE_LENGTH) { + sense_len = kpthru->sense_len; + } + + for (i = 0; i < sense_len; i++) { + if (ddi_copyout( + (uint8_t *)cmd->sense+i, + (uint8_t *)sense_ubuf+i, 1, mode)) { + con_log(CL_ANN, (CE_WARN, + "issue_mfi_pthru : " + "copy to user space failed")); + } + con_log(CL_DLEVEL1, (CE_WARN, + "Copying Sense info sense_buff[%d] = 0x%X", + i, *((uint8_t *)cmd->sense + i))); + } + } + (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, + DDI_DMA_SYNC_FORDEV); + if (xferlen) { /* free kernel buffer */ if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS) @@ -4312,6 +5551,7 @@ void *ubuf; uint32_t kphys_addr = 0; uint32_t xferlen = 0; + uint32_t new_xfer_length = 0; uint32_t model; dma_obj_t dcmd_dma_obj; struct mrsas_dcmd_frame *kdcmd; @@ -4320,25 +5560,26 @@ int i; dcmd = &cmd->frame->dcmd; kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; + if (instance->adapterresetinprogress) { con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " - "returning mfi_pkt and setting TRAN_BUSY\n")); + "returning mfi_pkt and setting TRAN_BUSY")); return (DDI_FAILURE); } model = ddi_model_convert_from(mode & FMODELS); if (model == DDI_MODEL_ILP32) { - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32")); xferlen = kdcmd->sgl.sge32[0].length; ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; } else { #ifdef _ILP32 - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32")); xferlen = kdcmd->sgl.sge32[0].length; ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; #else - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64")); xferlen = kdcmd->sgl.sge64[0].length; ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; #endif @@ -4346,7 +5587,10 @@ if (xferlen) { /* means IOCTL requires DMA */ /* allocate the data transfer buffer */ - dcmd_dma_obj.size = xferlen; + /* dcmd_dma_obj.size = xferlen; */ + MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length, + PAGESIZE); + dcmd_dma_obj.size = new_xfer_length; dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; @@ -4354,12 +5598,13 @@ dcmd_dma_obj.dma_attr.dma_attr_align = 1; /* allocate kernel buffer for DMA */ - if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, - (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { - con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " - "could not allocate data transfer buffer.")); - return (DDI_FAILURE); - } + if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { + con_log(CL_ANN, + (CE_WARN, "issue_mfi_dcmd: could not " + "allocate data transfer buffer.")); + return (DDI_FAILURE); + } (void) memset(dcmd_dma_obj.buffer, 0, xferlen); /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ @@ -4396,6 +5641,10 @@ cmd->sync_cmd = MRSAS_TRUE; cmd->frame_count = 1; + if (instance->tbolt) { + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + } + if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed")); } else { @@ -4415,6 +5664,8 @@ } kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status); + con_log(CL_ANN, + (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status)); DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t, kdcmd->cmd, uint8_t, kdcmd->cmd_status); @@ -4438,6 +5689,8 @@ void *response_ubuf; uint32_t request_xferlen = 0; uint32_t response_xferlen = 0; + uint32_t new_xfer_length1 = 0; + uint32_t new_xfer_length2 = 0; uint_t model; dma_obj_t request_dma_obj; dma_obj_t response_dma_obj; @@ -4455,44 +5708,44 @@ ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0]; if (instance->adapterresetinprogress) { - con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " + con_log(CL_ANN1, (CE_WARN, "Reset flag set, " "returning mfi_pkt and setting TRAN_BUSY\n")); return (DDI_FAILURE); } model = ddi_model_convert_from(mode & FMODELS); if (model == DDI_MODEL_ILP32) { - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32")); sge32 = &ksmp->sgl[0].sge32[0]; response_xferlen = sge32[0].length; request_xferlen = sge32[1].length; - con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " + con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: " "response_xferlen = %x, request_xferlen = %x", response_xferlen, request_xferlen)); response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " + con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: " "response_ubuf = %p, request_ubuf = %p", response_ubuf, request_ubuf)); } else { #ifdef _ILP32 - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32")); sge32 = &ksmp->sgl[0].sge32[0]; response_xferlen = sge32[0].length; request_xferlen = sge32[1].length; - con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " + con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: " "response_xferlen = %x, request_xferlen = %x", response_xferlen, request_xferlen)); response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " + con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: " "response_ubuf = %p, request_ubuf = %p", response_ubuf, request_ubuf)); #else - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64")); sge64 = &ksmp->sgl[0].sge64[0]; response_xferlen = sge64[0].length; @@ -4505,7 +5758,10 @@ if (request_xferlen) { /* means IOCTL requires DMA */ /* allocate the data transfer buffer */ - request_dma_obj.size = request_xferlen; + /* request_dma_obj.size = request_xferlen; */ + MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen, + new_xfer_length1, PAGESIZE); + request_dma_obj.size = new_xfer_length1; request_dma_obj.dma_attr = mrsas_generic_dma_attr; request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; @@ -4536,7 +5792,10 @@ if (response_xferlen) { /* means IOCTL requires DMA */ /* allocate the data transfer buffer */ - response_dma_obj.size = response_xferlen; + /* response_dma_obj.size = response_xferlen; */ + MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen, + new_xfer_length2, PAGESIZE); + response_dma_obj.size = new_xfer_length2; response_dma_obj.dma_attr = mrsas_generic_dma_attr; response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; @@ -4580,7 +5839,7 @@ model = ddi_model_convert_from(mode & FMODELS); if (model == DDI_MODEL_ILP32) { - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32")); sge32 = &smp->sgl[0].sge32[0]; @@ -4592,7 +5851,7 @@ request_dma_obj.dma_cookie[0].dmac_address); } else { #ifdef _ILP32 - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32")); sge32 = &smp->sgl[0].sge32[0]; ddi_put32(acc_handle, &sge32[0].length, response_xferlen); @@ -4602,7 +5861,7 @@ ddi_put32(acc_handle, &sge32[1].phys_addr, request_dma_obj.dma_cookie[0].dmac_address); #else - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64")); sge64 = &smp->sgl[0].sge64[0]; ddi_put32(acc_handle, &sge64[0].length, response_xferlen); @@ -4613,7 +5872,7 @@ request_dma_obj.dma_cookie[0].dmac_address); #endif } - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp : " + con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : " "smp->response_xferlen = %d, smp->request_xferlen = %d " "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length), ddi_get32(acc_handle, &sge32[1].length), @@ -4622,11 +5881,15 @@ cmd->sync_cmd = MRSAS_TRUE; cmd->frame_count = 1; + if (instance->tbolt) { + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + } + if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: fw_ioctl failed")); } else { - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: copy to user space")); if (request_xferlen) { @@ -4660,7 +5923,7 @@ ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status); con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", - ddi_get8(acc_handle, &smp->cmd_status))); + ksmp->cmd_status)); DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status); if (request_xferlen) { @@ -4690,6 +5953,8 @@ void *fis_ubuf; void *data_ubuf; uint32_t fis_xferlen = 0; + uint32_t new_xfer_length1 = 0; + uint32_t new_xfer_length2 = 0; uint32_t data_xferlen = 0; uint_t model; dma_obj_t fis_dma_obj; @@ -4703,24 +5968,22 @@ kstp = (struct mrsas_stp_frame *)&ioctl->frame[0]; if (instance->adapterresetinprogress) { - con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " + con_log(CL_ANN1, (CE_WARN, "Reset flag set, " "returning mfi_pkt and setting TRAN_BUSY\n")); return (DDI_FAILURE); } model = ddi_model_convert_from(mode & FMODELS); if (model == DDI_MODEL_ILP32) { - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32")); fis_xferlen = kstp->sgl.sge32[0].length; data_xferlen = kstp->sgl.sge32[1].length; fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; - } - else - { + } else { #ifdef _ILP32 - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32")); fis_xferlen = kstp->sgl.sge32[0].length; data_xferlen = kstp->sgl.sge32[1].length; @@ -4728,7 +5991,7 @@ fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; #else - con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); + con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64")); fis_xferlen = kstp->sgl.sge64[0].length; data_xferlen = kstp->sgl.sge64[1].length; @@ -4740,12 +6003,15 @@ if (fis_xferlen) { - con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " + con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: " "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); /* means IOCTL requires DMA */ /* allocate the data transfer buffer */ - fis_dma_obj.size = fis_xferlen; + /* fis_dma_obj.size = fis_xferlen; */ + MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen, + new_xfer_length1, PAGESIZE); + fis_dma_obj.size = new_xfer_length1; fis_dma_obj.dma_attr = mrsas_generic_dma_attr; fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; @@ -4773,19 +6039,22 @@ } if (data_xferlen) { - con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " + con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p " "data_xferlen = %x", data_ubuf, data_xferlen)); /* means IOCTL requires DMA */ /* allocate the data transfer buffer */ - data_dma_obj.size = data_xferlen; + /* data_dma_obj.size = data_xferlen; */ + MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2, + PAGESIZE); + data_dma_obj.size = new_xfer_length2; data_dma_obj.dma_attr = mrsas_generic_dma_attr; data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; data_dma_obj.dma_attr.dma_attr_sgllen = 1; data_dma_obj.dma_attr.dma_attr_align = 1; -/* allocate kernel buffer for DMA */ + /* allocate kernel buffer for DMA */ if (mrsas_alloc_dma_obj(instance, &data_dma_obj, (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " @@ -4829,6 +6098,10 @@ cmd->sync_cmd = MRSAS_TRUE; cmd->frame_count = 1; + if (instance->tbolt) { + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + } + if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed")); } else { @@ -4860,6 +6133,8 @@ } kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status); + con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d", + kstp->cmd_status)); DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status); if (fis_xferlen) { @@ -4880,7 +6155,7 @@ /* * fill_up_drv_ver */ -static void +void fill_up_drv_ver(struct mrsas_drv_ver *dv) { (void) memset(dv, 0, sizeof (struct mrsas_drv_ver)); @@ -4891,6 +6166,7 @@ (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION)); (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE, strlen(MRSAS_RELDATE)); + } /* @@ -4917,7 +6193,7 @@ model = ddi_model_convert_from(mode & FMODELS); if (model == DDI_MODEL_ILP32) { - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: DDI_MODEL_ILP32")); xferlen = kdcmd->sgl.sge32[0].length; @@ -4925,23 +6201,23 @@ ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; } else { #ifdef _ILP32 - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: DDI_MODEL_ILP32")); xferlen = kdcmd->sgl.sge32[0].length; ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; #else - con_log(CL_ANN1, (CE_NOTE, + con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: DDI_MODEL_LP64")); xferlen = kdcmd->sgl.sge64[0].length; ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; #endif } - con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " + con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: " "dataBuf=%p size=%d bytes", ubuf, xferlen)); switch (kdcmd->opcode) { case MRSAS_DRIVER_IOCTL_DRIVER_VERSION: - con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " + con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: " "MRSAS_DRIVER_IOCTL_DRIVER_VERSION")); fill_up_drv_ver(&dv); @@ -5017,8 +6293,11 @@ struct mrsas_header *hdr; struct mrsas_cmd *cmd; - cmd = get_mfi_pkt(instance); - + if (instance->tbolt) { + cmd = get_raid_msg_mfi_pkt(instance); + } else { + cmd = get_mfi_pkt(instance); + } if (!cmd) { con_log(CL_ANN, (CE_WARN, "mr_sas: " "failed to get a cmd packet")); @@ -5026,7 +6305,6 @@ instance->fw_outstanding, uint16_t, instance->max_fw_cmds); return (DDI_FAILURE); } - cmd->retry_count_for_ocr = 0; /* Clear the frame buffer and assign back the context id */ (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); @@ -5059,7 +6337,11 @@ if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) rval = DDI_FAILURE; - return_mfi_pkt(instance, cmd); + if (instance->tbolt) { + return_raid_msg_mfi_pkt(instance, cmd); + } else { + return_mfi_pkt(instance, cmd); + } return (rval); } @@ -5091,6 +6373,7 @@ union mrsas_evt_class_locale curr_aen; union mrsas_evt_class_locale prev_aen; + con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); /* * If there an AEN pending already (aen_cmd), check if the * class_locale of that pending AEN is inclusive of the new @@ -5151,14 +6434,18 @@ curr_aen.members.locale = LE_16(curr_aen.members.locale); } - cmd = get_mfi_pkt(instance); + if (instance->tbolt) { + cmd = get_raid_msg_mfi_pkt(instance); + } else { + cmd = get_mfi_pkt(instance); + } if (!cmd) { DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding, uint16_t, instance->max_fw_cmds); return (ENOMEM); } - cmd->retry_count_for_ocr = 0; + /* Clear the frame buffer and assign back the context id */ (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, @@ -5207,12 +6494,15 @@ /* Issue the aen registration frame */ /* atomic_add_16 (&instance->fw_outstanding, 1); */ + if (instance->tbolt) { + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + } instance->func_ptr->issue_cmd(cmd, instance); return (0); } -static void +void display_scsi_inquiry(caddr_t scsi_inq) { #define MAX_SCSI_DEVICE_CODE 14 @@ -5220,38 +6510,38 @@ char inquiry_buf[256] = {0}; int len; const char *const scsi_device_types[] = { - "Direct-Access ", + "Direct-Access ", "Sequential-Access", - "Printer ", - "Processor ", - "WORM ", - "CD-ROM ", - "Scanner ", - "Optical Device ", - "Medium Changer ", - "Communications ", - "Unknown ", - "Unknown ", - "Unknown ", - "Enclosure ", + "Printer ", + "Processor ", + "WORM ", + "CD-ROM ", + "Scanner ", + "Optical Device ", + "Medium Changer ", + "Communications ", + "Unknown ", + "Unknown ", + "Unknown ", + "Enclosure ", }; len = 0; - len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); + len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); for (i = 8; i < 16; i++) { len += snprintf(inquiry_buf + len, 265 - len, "%c", scsi_inq[i]); } - len += snprintf(inquiry_buf + len, 265 - len, " Model: "); + len += snprintf(inquiry_buf + len, 265 - len, " Model: "); for (i = 16; i < 32; i++) { len += snprintf(inquiry_buf + len, 265 - len, "%c", scsi_inq[i]); } - len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); + len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); for (i = 32; i < 36; i++) { len += snprintf(inquiry_buf + len, 265 - len, "%c", @@ -5264,13 +6554,13 @@ i = scsi_inq[0] & 0x1f; - len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", + len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : - "Unknown "); + "Unknown "); len += snprintf(inquiry_buf + len, 265 - len, - " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); + " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); @@ -5278,7 +6568,7 @@ len += snprintf(inquiry_buf + len, 265 - len, "\n"); } - con_log(CL_ANN1, (CE_CONT, inquiry_buf)); + con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf)); } static void @@ -5294,8 +6584,9 @@ mlist_t process_list; if (instance->adapterresetinprogress == 1) { - con_log(CL_ANN1, (CE_NOTE, "io_timeout_checker" + con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:" " reset in progress")); + instance->timeout_id = timeout(io_timeout_checker, (void *) instance, drv_usectohz(MRSAS_1_SECOND)); return; @@ -5303,10 +6594,18 @@ /* See if this check needs to be in the beginning or last in ISR */ if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) { - con_log(CL_ANN1, (CE_NOTE, - "Fw Fault state Handling in io_timeout_checker")); + cmn_err(CE_WARN, "io_timeout_checker: " + "FW Fault, calling reset adapter"); + cmn_err(CE_CONT, "io_timeout_checker: " + "fw_outstanding 0x%X max_fw_cmds 0x%X", + instance->fw_outstanding, instance->max_fw_cmds); if (instance->adapterresetinprogress == 0) { - (void) mrsas_reset_ppc(instance); + instance->adapterresetinprogress = 1; + if (instance->tbolt) + (void) mrsas_tbolt_reset_ppc(instance); + else + (void) mrsas_reset_ppc(instance); + instance->adapterresetinprogress = 0; } instance->timeout_id = timeout(io_timeout_checker, (void *) instance, drv_usectohz(MRSAS_1_SECOND)); @@ -5337,10 +6636,12 @@ time = --cmd->drv_pkt_time; } if (time <= 0) { - con_log(CL_ANN1, (CE_NOTE, "%llx: " - "io_timeout_checker: TIMING OUT: pkt " - ": %p, cmd %p", gethrtime(), (void *)pkt, - (void *)cmd)); + cmn_err(CE_WARN, "%llx: " + "io_timeout_checker: TIMING OUT: pkt: %p, " + "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n", + gethrtime(), (void *)pkt, (void *)cmd, + instance->fw_outstanding, instance->max_fw_cmds); + counter++; break; } @@ -5348,52 +6649,57 @@ mutex_exit(&instance->cmd_pend_mtx); if (counter) { - con_log(CL_ANN1, (CE_NOTE, - "io_timeout_checker " - "cmd->retrycount_for_ocr %d, " - "cmd index %d , cmd address %p ", - cmd->retry_count_for_ocr+1, cmd->index, (void *)cmd)); - if (instance->disable_online_ctrl_reset == 1) { - con_log(CL_ANN1, (CE_NOTE, "mrsas: " - "OCR is not supported by the Firmware " - "Failing all the queued packets \n")); - - (void) mrsas_kill_adapter(instance); + cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT " + "supported by Firmware, KILL adapter!!!", + instance->instance, __func__); + + if (instance->tbolt) + mrsas_tbolt_kill_adapter(instance); + else + (void) mrsas_kill_adapter(instance); + return; } else { - if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) { + if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) { if (instance->adapterresetinprogress == 0) { - con_log(CL_ANN1, (CE_NOTE, "mrsas: " - "OCR is supported by FW " - "triggering mrsas_reset_ppc")); - (void) mrsas_reset_ppc(instance); + if (instance->tbolt) { + (void) mrsas_tbolt_reset_ppc( + instance); + } else { + (void) mrsas_reset_ppc( + instance); + } } } else { - con_log(CL_ANN1, (CE_NOTE, - "io_timeout_checker:" - " cmdindex: %d,cmd address: %p " + cmn_err(CE_WARN, + "io_timeout_checker: " + "cmd %p cmd->index %d " "timed out even after 3 resets: " - "so kill adapter", cmd->index, - (void *)cmd)); - (void) mrsas_kill_adapter(instance); + "so KILL adapter", (void *)cmd, cmd->index); + + mrsas_print_cmd_details(instance, cmd, 0xDD); + + if (instance->tbolt) + mrsas_tbolt_kill_adapter(instance); + else + (void) mrsas_kill_adapter(instance); return; } } } - - - con_log(CL_ANN1, (CE_NOTE, "mrsas: " + con_log(CL_ANN, (CE_NOTE, "mrsas: " "schedule next timeout check: " "do timeout \n")); instance->timeout_id = timeout(io_timeout_checker, (void *)instance, drv_usectohz(MRSAS_1_SECOND)); } -static int + +static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *instance) { - return ((int)RD_OB_SCRATCH_PAD_0(instance)); + return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance)); } static void @@ -5404,7 +6710,7 @@ pkt = cmd->pkt; if (pkt) { - con_log(CL_ANN1, (CE_CONT, "%llx : issue_cmd_ppc:" + con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:" "ISSUED CMD TO FW : called : cmd:" ": %p instance : %p pkt : %p pkt_time : %x\n", gethrtime(), (void *)cmd, (void *)instance, @@ -5417,13 +6723,17 @@ } } else { - con_log(CL_ANN1, (CE_CONT, "%llx : issue_cmd_ppc:" + con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:" "ISSUED CMD TO FW : called : cmd : %p, instance: %p" "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance)); } + + mutex_enter(&instance->reg_write_mtx); /* Issue the command to the FW */ WR_IB_QPORT((cmd->frame_phys_addr) | (((cmd->frame_count - 1) << 1) | 1), instance); + mutex_exit(&instance->reg_write_mtx); + } /* @@ -5444,10 +6754,12 @@ cmd->frame_dma_obj.acc_handle, &hdr->timeout); if (cmd->drv_pkt_time < debug_timeout_g) cmd->drv_pkt_time = (uint16_t)debug_timeout_g; + con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: " "issue and return in reset case\n")); WR_IB_QPORT((cmd->frame_phys_addr) | (((cmd->frame_count - 1) << 1) | 1), instance); + return (DDI_SUCCESS); } else { con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n")); @@ -5456,15 +6768,16 @@ cmd->cmd_status = ENODATA; + mutex_enter(&instance->reg_write_mtx); + /* Issue the command to the FW */ WR_IB_QPORT((cmd->frame_phys_addr) | (((cmd->frame_count - 1) << 1) | 1), instance); + mutex_exit(&instance->reg_write_mtx); mutex_enter(&instance->int_cmd_mtx); - for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); } - mutex_exit(&instance->int_cmd_mtx); con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done")); @@ -5494,7 +6807,7 @@ ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, MFI_CMD_STATUS_POLL_MODE); flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); - flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); @@ -5511,7 +6824,7 @@ if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) { - con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode: " + con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " "cmd polling timed out")); return (DDI_FAILURE); } @@ -5607,18 +6920,18 @@ static int mrsas_kill_adapter(struct mrsas_instance *instance) { - if (instance->deadadapter == 1) - return (DDI_FAILURE); - - con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: " - "Writing to doorbell with MFI_STOP_ADP ")); - mutex_enter(&instance->ocr_flags_mtx); - instance->deadadapter = 1; - mutex_exit(&instance->ocr_flags_mtx); - instance->func_ptr->disable_intr(instance); - WR_IB_DOORBELL(MFI_STOP_ADP, instance); - (void) mrsas_complete_pending_cmds(instance); - return (DDI_SUCCESS); + if (instance->deadadapter == 1) + return (DDI_FAILURE); + + con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: " + "Writing to doorbell with MFI_STOP_ADP ")); + mutex_enter(&instance->ocr_flags_mtx); + instance->deadadapter = 1; + mutex_exit(&instance->ocr_flags_mtx); + instance->func_ptr->disable_intr(instance); + WR_IB_DOORBELL(MFI_STOP_ADP, instance); + (void) mrsas_complete_pending_cmds(instance); + return (DDI_SUCCESS); } @@ -5630,9 +6943,11 @@ uint32_t cur_abs_reg_val; uint32_t fw_state; + con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + if (instance->deadadapter == 1) { - con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " - "no more resets as HBA has been marked dead ")); + cmn_err(CE_WARN, "mrsas_reset_ppc: " + "no more resets as HBA has been marked dead "); return (DDI_FAILURE); } mutex_enter(&instance->ocr_flags_mtx); @@ -5640,6 +6955,7 @@ mutex_exit(&instance->ocr_flags_mtx); con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress " "flag set, time %llx", gethrtime())); + instance->func_ptr->disable_intr(instance); retry_reset: WR_IB_WRITE_SEQ(0, instance); @@ -5657,8 +6973,8 @@ delay(100 * drv_usectohz(MILLISEC)); status = RD_OB_DRWE(instance); if (retry++ == 100) { - con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: DRWE bit " - "check retry count %d\n", retry)); + cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit " + "check retry count %d", retry); return (DDI_FAILURE); } } @@ -5669,11 +6985,14 @@ delay(100 * drv_usectohz(MILLISEC)); status = RD_OB_DRWE(instance); if (retry++ == 100) { + cmn_err(CE_WARN, "mrsas_reset_ppc: " + "RESET FAILED. KILL adapter called."); + (void) mrsas_kill_adapter(instance); return (DDI_FAILURE); } } - con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete")); + con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete")); con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " "Calling mfi_state_transition_to_ready")); @@ -5700,15 +7019,18 @@ instance->fw_fault_count_after_ocr++; if (instance->fw_fault_count_after_ocr < MAX_FW_RESET_COUNT) { - con_log(CL_ANN1, (CE_WARN, "mrsas_reset_ppc: " - "FW is in fault after OCR count %d ", - instance->fw_fault_count_after_ocr)); + cmn_err(CE_WARN, "mrsas_reset_ppc: " + "FW is in fault after OCR count %d " + "Retry Reset", + instance->fw_fault_count_after_ocr); goto retry_reset; } else { - con_log(CL_ANN1, (CE_WARN, "mrsas_reset_ppc: " - "Max Reset Count exceeded " - "Mark HBA as bad")); + cmn_err(CE_WARN, "mrsas_reset_ppc: " + "Max Reset Count exceeded >%d" + "Mark HBA as bad, KILL adapter", + MAX_FW_RESET_COUNT); + (void) mrsas_kill_adapter(instance); return (DDI_FAILURE); } @@ -5734,37 +7056,52 @@ (void) mrsas_issue_init_mfi(instance); con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " "mrsas_issue_init_mfi Done")); + con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " "Calling mrsas_print_pending_cmd\n")); (void) mrsas_print_pending_cmds(instance); con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " "mrsas_print_pending_cmd done\n")); + instance->func_ptr->enable_intr(instance); instance->fw_outstanding = 0; + con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " "Calling mrsas_issue_pending_cmds")); (void) mrsas_issue_pending_cmds(instance); con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " - "Complete")); + "issue_pending_cmds done.\n")); + con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " "Calling aen registration")); + + + instance->aen_cmd->retry_count_for_ocr = 0; + instance->aen_cmd->drv_pkt_time = 0; + instance->func_ptr->issue_cmd(instance->aen_cmd, instance); con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n")); + mutex_enter(&instance->ocr_flags_mtx); instance->adapterresetinprogress = 0; mutex_exit(&instance->ocr_flags_mtx); con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " "adpterresetinprogress flag unset")); + con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n")); return (DDI_SUCCESS); } -static int -mrsas_common_check(struct mrsas_instance *instance, - struct mrsas_cmd *cmd) + +/* + * FMA functions. + */ +int +mrsas_common_check(struct mrsas_instance *instance, struct mrsas_cmd *cmd) { int ret = DDI_SUCCESS; - if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != + if (cmd != NULL && + mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != DDI_SUCCESS) { ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); if (cmd->pkt != NULL) { @@ -5776,7 +7113,7 @@ if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) != DDI_SUCCESS) { ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); - if (cmd->pkt != NULL) { + if (cmd != NULL && cmd->pkt != NULL) { cmd->pkt->pkt_reason = CMD_TRAN_ERR; cmd->pkt->pkt_statistics = 0; } @@ -5785,7 +7122,7 @@ if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) != DDI_SUCCESS) { ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); - if (cmd->pkt != NULL) { + if (cmd != NULL && cmd->pkt != NULL) { cmd->pkt->pkt_reason = CMD_TRAN_ERR; cmd->pkt->pkt_statistics = 0; } @@ -5796,7 +7133,7 @@ ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0); - if (cmd->pkt != NULL) { + if (cmd != NULL && cmd->pkt != NULL) { cmd->pkt->pkt_reason = CMD_TRAN_ERR; cmd->pkt->pkt_statistics = 0; } @@ -5940,7 +7277,7 @@ int avail, actual, count; int i, flag, ret; - con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: intr_type = %x", + con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x", intr_type)); /* Get number of interrupts */ @@ -5952,7 +7289,7 @@ return (DDI_FAILURE); } - con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: count = %d ", count)); + con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count)); /* Get number of available interrupts */ ret = ddi_intr_get_navail(dip, intr_type, &avail); @@ -5962,7 +7299,7 @@ return (DDI_FAILURE); } - con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: avail = %d ", avail)); + con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail)); /* Only one interrupt routine. So limit the count to 1 */ if (count > 1) { @@ -5973,12 +7310,14 @@ * Allocate an array of interrupt handlers. Currently we support * only one interrupt. The framework can be extended later. */ - instance->intr_size = count * sizeof (ddi_intr_handle_t); - instance->intr_htable = kmem_zalloc(instance->intr_size, KM_SLEEP); + instance->intr_htable_size = count * sizeof (ddi_intr_handle_t); + instance->intr_htable = kmem_zalloc(instance->intr_htable_size, + KM_SLEEP); ASSERT(instance->intr_htable); - flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type == - DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL; + flag = ((intr_type == DDI_INTR_TYPE_MSI) || + (intr_type == DDI_INTR_TYPE_MSIX)) ? + DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL; /* Allocate interrupt */ ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0, @@ -5987,9 +7326,9 @@ if ((ret != DDI_SUCCESS) || (actual == 0)) { con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " "avail = %d", avail)); - kmem_free(instance->intr_htable, instance->intr_size); - return (DDI_FAILURE); - } + goto mrsas_free_htable; + } + if (actual < count) { con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " "Requested = %d Received = %d", count, actual)); @@ -6003,12 +7342,7 @@ &instance->intr_pri)) != DDI_SUCCESS) { con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " "get priority call failed")); - - for (i = 0; i < actual; i++) { - (void) ddi_intr_free(instance->intr_htable[i]); - } - kmem_free(instance->intr_htable, instance->intr_size); - return (DDI_FAILURE); + goto mrsas_free_handles; } /* @@ -6017,12 +7351,7 @@ if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) { con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " "High level interrupts not supported.")); - - for (i = 0; i < actual; i++) { - (void) ddi_intr_free(instance->intr_htable[i]); - } - kmem_free(instance->intr_htable, instance->intr_size); - return (DDI_FAILURE); + goto mrsas_free_handles; } con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ", @@ -6037,31 +7366,18 @@ if (ret != DDI_SUCCESS) { con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:" "failed %d", ret)); - - for (i = 0; i < actual; i++) { - (void) ddi_intr_free(instance->intr_htable[i]); - } - kmem_free(instance->intr_htable, instance->intr_size); - return (DDI_FAILURE); - } - - } - - con_log(CL_DLEVEL1, (CE_WARN, " ddi_intr_add_handler done")); + goto mrsas_free_handles; + } + + } + + con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done")); if ((ret = ddi_intr_get_cap(instance->intr_htable[0], &instance->intr_cap)) != DDI_SUCCESS) { con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d", ret)); - - /* Free already allocated intr */ - for (i = 0; i < actual; i++) { - (void) ddi_intr_remove_handler( - instance->intr_htable[i]); - (void) ddi_intr_free(instance->intr_htable[i]); - } - kmem_free(instance->intr_htable, instance->intr_size); - return (DDI_FAILURE); + goto mrsas_free_handlers; } if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { @@ -6081,6 +7397,23 @@ return (DDI_SUCCESS); +mrsas_free_handlers: + for (i = 0; i < actual; i++) + (void) ddi_intr_remove_handler(instance->intr_htable[i]); + +mrsas_free_handles: + for (i = 0; i < actual; i++) + (void) ddi_intr_free(instance->intr_htable[i]); + +mrsas_free_htable: + if (instance->intr_htable != NULL) + kmem_free(instance->intr_htable, instance->intr_htable_size); + + instance->intr_htable = NULL; + instance->intr_htable_size = 0; + + return (DDI_FAILURE); + } @@ -6108,7 +7441,12 @@ (void) ddi_intr_free(instance->intr_htable[i]); } - kmem_free(instance->intr_htable, instance->intr_size); + if (instance->intr_htable != NULL) + kmem_free(instance->intr_htable, instance->intr_htable_size); + + instance->intr_htable = NULL; + instance->intr_htable_size = 0; + } static int @@ -6117,7 +7455,7 @@ { struct mrsas_instance *instance; int config; - int rval; + int rval = NDI_SUCCESS; char *ptr = NULL; int tgt, lun; @@ -6148,6 +7486,11 @@ if (lun == 0) { rval = mrsas_config_ld(instance, tgt, lun, childp); +#ifdef PDSUPPORT + } else if (instance->tbolt == 1 && lun != 0) { + rval = mrsas_tbolt_config_pd(instance, + tgt, lun, childp); +#endif } else { rval = NDI_FAILURE; } @@ -6185,6 +7528,15 @@ } +#ifdef PDSUPPORT + /* Config PD devices connected to the card */ + if (instance->tbolt) { + for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) { + (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL); + } + } +#endif + rval = NDI_SUCCESS; return (rval); } @@ -6241,16 +7593,21 @@ dev_info_t *child; int rval; - con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d", + con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d", tgt, lun)); if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { if (ldip) { *ldip = child; } - con_log(CL_ANN1, (CE_NOTE, - "mrsas_config_ld: Child = %p found t = %d l = %d", - (void *)child, tgt, lun)); + if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) { + rval = mrsas_service_evt(instance, tgt, 0, + MRSAS_EVT_UNCONFIG_TGT, NULL); + con_log(CL_ANN1, (CE_WARN, + "mr_sas: DELETING STALE ENTRY rval = %d " + "tgt id = %d ", rval, tgt)); + return (NDI_FAILURE); + } return (NDI_SUCCESS); } @@ -6271,12 +7628,12 @@ } kmem_free(sd, sizeof (struct scsi_device)); - con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: return rval = %d", + con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d", rval)); return (rval); } -static int +int mrsas_config_scsi_device(struct mrsas_instance *instance, struct scsi_device *sd, dev_info_t **dipp) { @@ -6290,7 +7647,7 @@ int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; int rval; - con_log(CL_ANN1, (CE_WARN, "mr_sas: scsi_device t%dL%d", tgt, lun)); + con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun)); scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, NULL, &nodename, &compatible, &ncompatible); @@ -6302,12 +7659,12 @@ } childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename; - con_log(CL_ANN1, (CE_WARN, + con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: Childname = %2s nodename = %s", childname, nodename)); /* Create a dev node */ rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip); - con_log(CL_ANN1, (CE_WARN, + con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval)); if (rval == NDI_SUCCESS) { if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) != @@ -6341,7 +7698,7 @@ ndi_prop_remove_all(ldip); (void) ndi_devi_free(ldip); } else { - con_log(CL_ANN1, (CE_WARN, "mr_sas: online Done :" + con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :" "0 t%dl%d", tgt, lun)); } @@ -6351,7 +7708,7 @@ *dipp = ldip; } - con_log(CL_DLEVEL1, (CE_WARN, + con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: config_scsi_device rval = %d t%dL%d", rval, tgt, lun)); scsi_hba_nodename_compatible_free(nodename, compatible); @@ -6359,7 +7716,7 @@ } /*ARGSUSED*/ -static int +int mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event, uint64_t wwn) { @@ -6378,6 +7735,7 @@ mrevt->tgt = tgt; mrevt->lun = lun; mrevt->event = event; + mrevt->wwn = wwn; if ((ddi_taskq_dispatch(instance->taskq, (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) != @@ -6405,10 +7763,17 @@ mrevt->tgt, mrevt->lun, mrevt->event)); if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) { + mutex_enter(&instance->config_dev_mtx); dip = instance->mr_ld_list[mrevt->tgt].dip; + mutex_exit(&instance->config_dev_mtx); +#ifdef PDSUPPORT } else { - return; - } + mutex_enter(&instance->config_dev_mtx); + dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip; + mutex_exit(&instance->config_dev_mtx); +#endif + } + ndi_devi_enter(instance->dip, &circ1); switch (mrevt->event) { @@ -6418,6 +7783,12 @@ if (mrevt->lun == 0) { (void) mrsas_config_ld(instance, mrevt->tgt, 0, NULL); +#ifdef PDSUPPORT + } else if (instance->tbolt) { + (void) mrsas_tbolt_config_pd(instance, + mrevt->tgt, + 1, NULL); +#endif } con_log(CL_ANN1, (CE_NOTE, "mr_sas: EVT_CONFIG_TGT called:" @@ -6461,11 +7832,12 @@ ndi_devi_exit(instance->dip, circ1); } -static int + +int mrsas_mode_sense_build(struct scsi_pkt *pkt) { union scsi_cdb *cdbp; - uint16_t page_code; + uint16_t page_code; struct scsa_cmd *acmd; struct buf *bp; struct mode_header *modehdrp; diff -r 4eac7a87eff2 -r 267f693f357e usr/src/uts/common/io/mr_sas/mr_sas.conf --- a/usr/src/uts/common/io/mr_sas/mr_sas.conf Wed Nov 07 14:05:17 2012 -0800 +++ b/usr/src/uts/common/io/mr_sas/mr_sas.conf Wed Nov 07 15:57:25 2012 -0500 @@ -1,13 +1,15 @@ # -# Copyright (c) 2008-2009, LSI Logic Corporation. +# Copyright (c) 2008-2012, LSI Logic Corporation. # All rights reserved. # -# Copyright 2009 Sun Microsystems, Inc. All rights reserved. -# Use is subject to license terms. -# # # mr_sas.conf for sol 10 (and later) for all supported architectures # -# global definitions + +# MSI specific flag. Default is "yes". +# mrsas-enable-msi="yes"; +# Fast-Path specific flag. Default is "yes". +# mrsas-enable-fp="yes"; + diff -r 4eac7a87eff2 -r 267f693f357e usr/src/uts/common/io/mr_sas/mr_sas.h --- a/usr/src/uts/common/io/mr_sas/mr_sas.h Wed Nov 07 14:05:17 2012 -0800 +++ b/usr/src/uts/common/io/mr_sas/mr_sas.h Wed Nov 07 15:57:25 2012 -0500 @@ -2,9 +2,17 @@ * mr_sas.h: header for mr_sas * * Solaris MegaRAID driver for SAS2.0 controllers - * Copyright (c) 2008-2009, LSI Logic Corporation. + * Copyright (c) 2008-2012, LSI Logic Corporation. * All rights reserved. * + * Version: + * Author: + * Swaminathan K S + * Arun Chandrashekhar + * Manju R + * Rasheed + * Shakeel Bukhari + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * @@ -36,6 +44,7 @@ /* * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. */ + #ifndef _MR_SAS_H_ #define _MR_SAS_H_ @@ -45,12 +54,13 @@ #include #include "mr_sas_list.h" +#include "ld_pd_map.h" /* * MegaRAID SAS2.0 Driver meta data */ -#define MRSAS_VERSION "LSIv2.7" -#define MRSAS_RELDATE "Apr 21, 2010" +#define MRSAS_VERSION "6.503.00.00ILLUMOS" +#define MRSAS_RELDATE "July 30, 2012" #define MRSAS_TRUE 1 #define MRSAS_FALSE 0 @@ -58,16 +68,30 @@ #define ADAPTER_RESET_NOT_REQUIRED 0 #define ADAPTER_RESET_REQUIRED 1 +#define PDSUPPORT 1 + /* * MegaRAID SAS2.0 device id conversion definitions. */ #define INST2LSIRDCTL(x) ((x) << INST_MINOR_SHIFT) +#define MRSAS_GET_BOUNDARY_ALIGNED_LEN(len, new_len, boundary_len) { \ + int rem; \ + rem = (len / boundary_len); \ + if ((rem * boundary_len) != len) { \ + new_len = len + ((rem + 1) * boundary_len - len); \ + } else { \ + new_len = len; \ + } \ +} + /* * MegaRAID SAS2.0 supported controllers */ #define PCI_DEVICE_ID_LSI_2108VDE 0x0078 #define PCI_DEVICE_ID_LSI_2108V 0x0079 +#define PCI_DEVICE_ID_LSI_TBOLT 0x005b +#define PCI_DEVICE_ID_LSI_INVADER 0x005d /* * Register Index for 2108 Controllers. @@ -75,6 +99,7 @@ #define REGISTER_SET_IO_2108 (2) #define MRSAS_MAX_SGE_CNT 0x50 +#define MRSAS_APP_RESERVED_CMDS 32 #define MRSAS_IOCTL_DRIVER 0x12341234 #define MRSAS_IOCTL_FIRMWARE 0x12345678 @@ -82,13 +107,50 @@ #define MRSAS_1_SECOND 1000000 +#ifdef PDSUPPORT + +#define UNCONFIGURED_GOOD 0x0 +#define PD_SYSTEM 0x40 +#define MR_EVT_PD_STATE_CHANGE 0x0072 +#define MR_EVT_PD_REMOVED_EXT 0x00f8 +#define MR_EVT_PD_INSERTED_EXT 0x00f7 +#define MR_DCMD_PD_GET_INFO 0x02020000 +#define MRSAS_TBOLT_PD_LUN 1 +#define MRSAS_TBOLT_PD_TGT_MAX 255 +#define MRSAS_TBOLT_GET_PD_MAX(s) ((s)->mr_tbolt_pd_max) + +#endif + +/* Raid Context Flags */ +#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4 +#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30 +typedef enum MR_RAID_FLAGS_IO_SUB_TYPE { + MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0, + MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1 +} MR_RAID_FLAGS_IO_SUB_TYPE; + /* Dynamic Enumeration Flags */ -#define MRSAS_PD_LUN 1 #define MRSAS_LD_LUN 0 -#define MRSAS_PD_TGT_MAX 255 -#define MRSAS_GET_PD_MAX(s) ((s)->mr_pd_max) #define WWN_STRLEN 17 -#define APP_RESERVE_CMDS 32 +#define LD_SYNC_BIT 1 +#define LD_SYNC_SHIFT 14 +/* ThunderBolt (TB) specific */ +#define MRSAS_THUNDERBOLT_MSG_SIZE 256 +#define MRSAS_THUNDERBOLT_MAX_COMMANDS 1024 +#define MRSAS_THUNDERBOLT_MAX_REPLY_COUNT 1024 +#define MRSAS_THUNDERBOLT_REPLY_SIZE 8 +#define MRSAS_THUNDERBOLT_MAX_CHAIN_COUNT 1 + +#define MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0 +#define MPI2_FUNCTION_LD_IO_REQUEST 0xF1 + +#define MR_EVT_LD_FAST_PATH_IO_STATUS_CHANGED (0xFFFF) + +#define MR_INTERNAL_MFI_FRAMES_SMID 1 +#define MR_CTRL_EVENT_WAIT_SMID 2 +#define MR_INTERNAL_DRIVER_RESET_SMID 3 + + /* * ===================================== * MegaRAID SAS2.0 MFI firmware definitions @@ -103,19 +165,18 @@ /* * FW posts its state in upper 4 bits of outbound_msg_0 register */ -#define MFI_STATE_SHIFT 28 -#define MFI_STATE_MASK ((uint32_t)0xF<regmap_handle, \ (uint32_t *)((uintptr_t)(instance)->regmap + WRITE_SEQ_OFF), (v)) @@ -899,6 +1103,13 @@ #define WR_IB_DRWE(v, instance) ddi_put32((instance)->regmap_handle, \ (uint32_t *)((uintptr_t)(instance)->regmap + HOST_DIAG_OFF), (v)) +#define IB_LOW_QPORT 0xC0 +#define IB_HIGH_QPORT 0xC4 +#define OB_DOORBELL_REGISTER 0x9C /* 1078 implementation */ + +/* + * All MFI register set macros accept mrsas_register_set* + */ #define WR_IB_MSG_0(v, instance) ddi_put32((instance)->regmap_handle, \ (uint32_t *)((uintptr_t)(instance)->regmap + IB_MSG_0_OFF), (v)) @@ -933,6 +1144,56 @@ #define RD_OB_SCRATCH_PAD_0(instance) ddi_get32((instance)->regmap_handle, \ (uint32_t *)((uintptr_t)(instance)->regmap + OB_SCRATCH_PAD_0_OFF)) +/* Thunderbolt specific registers */ +#define RD_OB_SCRATCH_PAD_2(instance) ddi_get32((instance)->regmap_handle, \ + (uint32_t *)((uintptr_t)(instance)->regmap + OB_SCRATCH_PAD_2_OFF)) + +#define WR_TBOLT_IB_WRITE_SEQ(v, instance) \ + ddi_put32((instance)->regmap_handle, \ + (uint32_t *)((uintptr_t)(instance)->regmap + WRITE_TBOLT_SEQ_OFF), (v)) + +#define RD_TBOLT_HOST_DIAG(instance) ddi_get32((instance)->regmap_handle, \ + (uint32_t *)((uintptr_t)(instance)->regmap + HOST_TBOLT_DIAG_OFF)) + +#define WR_TBOLT_HOST_DIAG(v, instance) ddi_put32((instance)->regmap_handle, \ + (uint32_t *)((uintptr_t)(instance)->regmap + HOST_TBOLT_DIAG_OFF), (v)) + +#define RD_TBOLT_RESET_STAT(instance) ddi_get32((instance)->regmap_handle, \ + (uint32_t *)((uintptr_t)(instance)->regmap + RESET_TBOLT_STATUS_OFF)) + + +#define WR_MPI2_REPLY_POST_INDEX(v, instance)\ + ddi_put32((instance)->regmap_handle,\ + (uint32_t *)\ + ((uintptr_t)(instance)->regmap + MPI2_REPLY_POST_HOST_INDEX_OFFSET),\ + (v)) + + +#define RD_MPI2_REPLY_POST_INDEX(instance)\ + ddi_get32((instance)->regmap_handle,\ + (uint32_t *)\ + ((uintptr_t)(instance)->regmap + MPI2_REPLY_POST_HOST_INDEX_OFFSET)) + +#define WR_IB_LOW_QPORT(v, instance) ddi_put32((instance)->regmap_handle, \ + (uint32_t *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT), (v)) + +#define WR_IB_HIGH_QPORT(v, instance) ddi_put32((instance)->regmap_handle, \ + (uint32_t *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT), (v)) + +#define WR_OB_DOORBELL_REGISTER_CLEAR(v, instance)\ + ddi_put32((instance)->regmap_handle,\ + (uint32_t *)((uintptr_t)(instance)->regmap + OB_DOORBELL_REGISTER), \ + (v)) + +#define WR_RESERVED0_REGISTER(v, instance) ddi_put32((instance)->regmap_handle,\ + (uint32_t *)((uintptr_t)(instance)->regmap + RESERVED0_REGISTER), \ + (v)) + +#define RD_RESERVED0_REGISTER(instance) ddi_get32((instance)->regmap_handle, \ + (uint32_t *)((uintptr_t)(instance)->regmap + RESERVED0_REGISTER)) + + + /* * When FW is in MFI_STATE_READY or MFI_STATE_OPERATIONAL, the state data * of Outbound Msg Reg 0 indicates max concurrent cmds supported, max SGEs @@ -948,6 +1209,9 @@ #define MFI_REPLY_2108_MESSAGE_INTR 0x00000001 #define MFI_REPLY_2108_MESSAGE_INTR_MASK 0x00000005 +/* Fusion interrupt mask */ +#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008) + #define MFI_POLL_TIMEOUT_SECS 60 #define MFI_ENABLE_INTR(instance) ddi_put32((instance)->regmap_handle, \ @@ -973,45 +1237,45 @@ * on_off_property of mrsas_ctrl_prop * bit0-9, 11-31 are reserved */ -#define DISABLE_OCR_PROP_FLAG 0x00000400 /* bit 10 */ +#define DISABLE_OCR_PROP_FLAG 0x00000400 /* bit 10 */ struct mrsas_register_set { - uint32_t reserved_0[4]; + uint32_t reserved_0[4]; /* 0000h */ - uint32_t inbound_msg_0; - uint32_t inbound_msg_1; - uint32_t outbound_msg_0; - uint32_t outbound_msg_1; + uint32_t inbound_msg_0; /* 0010h */ + uint32_t inbound_msg_1; /* 0014h */ + uint32_t outbound_msg_0; /* 0018h */ + uint32_t outbound_msg_1; /* 001Ch */ - uint32_t inbound_doorbell; - uint32_t inbound_intr_status; - uint32_t inbound_intr_mask; + uint32_t inbound_doorbell; /* 0020h */ + uint32_t inbound_intr_status; /* 0024h */ + uint32_t inbound_intr_mask; /* 0028h */ - uint32_t outbound_doorbell; - uint32_t outbound_intr_status; - uint32_t outbound_intr_mask; + uint32_t outbound_doorbell; /* 002Ch */ + uint32_t outbound_intr_status; /* 0030h */ + uint32_t outbound_intr_mask; /* 0034h */ - uint32_t reserved_1[2]; + uint32_t reserved_1[2]; /* 0038h */ - uint32_t inbound_queue_port; - uint32_t outbound_queue_port; + uint32_t inbound_queue_port; /* 0040h */ + uint32_t outbound_queue_port; /* 0044h */ - uint32_t reserved_2[22]; + uint32_t reserved_2[22]; /* 0048h */ - uint32_t outbound_doorbell_clear; + uint32_t outbound_doorbell_clear; /* 00A0h */ - uint32_t reserved_3[3]; + uint32_t reserved_3[3]; /* 00A4h */ - uint32_t outbound_scratch_pad; + uint32_t outbound_scratch_pad; /* 00B0h */ - uint32_t reserved_4[3]; + uint32_t reserved_4[3]; /* 00B4h */ - uint32_t inbound_low_queue_port; + uint32_t inbound_low_queue_port; /* 00C0h */ - uint32_t inbound_high_queue_port; + uint32_t inbound_high_queue_port; /* 00C4h */ - uint32_t reserved_5; - uint32_t index_registers[820]; + uint32_t reserved_5; /* 00C8h */ + uint32_t index_registers[820]; /* 00CCh */ }; struct mrsas_sge32 { @@ -1037,24 +1301,24 @@ }; struct mrsas_header { - uint8_t cmd; - uint8_t sense_len; - uint8_t cmd_status; - uint8_t scsi_status; + uint8_t cmd; /* 00h */ + uint8_t sense_len; /* 01h */ + uint8_t cmd_status; /* 02h */ + uint8_t scsi_status; /* 03h */ - uint8_t target_id; - uint8_t lun; - uint8_t cdb_len; - uint8_t sge_count; + uint8_t target_id; /* 04h */ + uint8_t lun; /* 05h */ + uint8_t cdb_len; /* 06h */ + uint8_t sge_count; /* 07h */ - uint32_t context; - uint8_t req_id; - uint8_t msgvector; - uint16_t pad_0; + uint32_t context; /* 08h */ + uint8_t req_id; /* 0Ch */ + uint8_t msgvector; /* 0Dh */ + uint16_t pad_0; /* 0Eh */ - uint16_t flags; - uint16_t timeout; - uint32_t data_xferlen; + uint16_t flags; /* 10h */ + uint16_t timeout; /* 12h */ + uint32_t data_xferlen; /* 14h */ }; union mrsas_sgl_frame { @@ -1063,198 +1327,199 @@ }; struct mrsas_init_frame { - uint8_t cmd; - uint8_t reserved_0; - uint8_t cmd_status; + uint8_t cmd; /* 00h */ + uint8_t reserved_0; /* 01h */ + uint8_t cmd_status; /* 02h */ - uint8_t reserved_1; - uint32_t reserved_2; + uint8_t reserved_1; /* 03h */ + uint32_t reserved_2; /* 04h */ - uint32_t context; - uint8_t req_id; - uint8_t msgvector; - uint16_t pad_0; + uint32_t context; /* 08h */ + uint8_t req_id; /* 0Ch */ + uint8_t msgvector; /* 0Dh */ + uint16_t pad_0; /* 0Eh */ - uint16_t flags; - uint16_t reserved_3; - uint32_t data_xfer_len; + uint16_t flags; /* 10h */ + uint16_t reserved_3; /* 12h */ + uint32_t data_xfer_len; /* 14h */ - uint32_t queue_info_new_phys_addr_lo; - uint32_t queue_info_new_phys_addr_hi; - uint32_t queue_info_old_phys_addr_lo; - uint32_t queue_info_old_phys_addr_hi; - - uint32_t reserved_4[6]; + uint32_t queue_info_new_phys_addr_lo; /* 18h */ + uint32_t queue_info_new_phys_addr_hi; /* 1Ch */ + uint32_t queue_info_old_phys_addr_lo; /* 20h */ + uint32_t queue_info_old_phys_addr_hi; /* 24h */ + uint64_t driverversion; /* 28h */ + uint32_t reserved_4[4]; /* 30h */ }; struct mrsas_init_queue_info { - uint32_t init_flags; - uint32_t reply_queue_entries; + uint32_t init_flags; /* 00h */ + uint32_t reply_queue_entries; /* 04h */ - uint32_t reply_queue_start_phys_addr_lo; - uint32_t reply_queue_start_phys_addr_hi; - uint32_t producer_index_phys_addr_lo; - uint32_t producer_index_phys_addr_hi; - uint32_t consumer_index_phys_addr_lo; - uint32_t consumer_index_phys_addr_hi; + uint32_t reply_queue_start_phys_addr_lo; /* 08h */ + uint32_t reply_queue_start_phys_addr_hi; /* 0Ch */ + uint32_t producer_index_phys_addr_lo; /* 10h */ + uint32_t producer_index_phys_addr_hi; /* 14h */ + uint32_t consumer_index_phys_addr_lo; /* 18h */ + uint32_t consumer_index_phys_addr_hi; /* 1Ch */ }; struct mrsas_io_frame { - uint8_t cmd; - uint8_t sense_len; - uint8_t cmd_status; - uint8_t scsi_status; + uint8_t cmd; /* 00h */ + uint8_t sense_len; /* 01h */ + uint8_t cmd_status; /* 02h */ + uint8_t scsi_status; /* 03h */ - uint8_t target_id; - uint8_t access_byte; - uint8_t reserved_0; - uint8_t sge_count; + uint8_t target_id; /* 04h */ + uint8_t access_byte; /* 05h */ + uint8_t reserved_0; /* 06h */ + uint8_t sge_count; /* 07h */ - uint32_t context; - uint8_t req_id; - uint8_t msgvector; - uint16_t pad_0; + uint32_t context; /* 08h */ + uint8_t req_id; /* 0Ch */ + uint8_t msgvector; /* 0Dh */ + uint16_t pad_0; /* 0Eh */ - uint16_t flags; - uint16_t timeout; - uint32_t lba_count; + uint16_t flags; /* 10h */ + uint16_t timeout; /* 12h */ + uint32_t lba_count; /* 14h */ - uint32_t sense_buf_phys_addr_lo; - uint32_t sense_buf_phys_addr_hi; + uint32_t sense_buf_phys_addr_lo; /* 18h */ + uint32_t sense_buf_phys_addr_hi; /* 1Ch */ - uint32_t start_lba_lo; - uint32_t start_lba_hi; + uint32_t start_lba_lo; /* 20h */ + uint32_t start_lba_hi; /* 24h */ - union mrsas_sgl sgl; + union mrsas_sgl sgl; /* 28h */ }; struct mrsas_pthru_frame { - uint8_t cmd; - uint8_t sense_len; - uint8_t cmd_status; - uint8_t scsi_status; + uint8_t cmd; /* 00h */ + uint8_t sense_len; /* 01h */ + uint8_t cmd_status; /* 02h */ + uint8_t scsi_status; /* 03h */ - uint8_t target_id; - uint8_t lun; - uint8_t cdb_len; - uint8_t sge_count; + uint8_t target_id; /* 04h */ + uint8_t lun; /* 05h */ + uint8_t cdb_len; /* 06h */ + uint8_t sge_count; /* 07h */ - uint32_t context; - uint8_t req_id; - uint8_t msgvector; - uint16_t pad_0; + uint32_t context; /* 08h */ + uint8_t req_id; /* 0Ch */ + uint8_t msgvector; /* 0Dh */ + uint16_t pad_0; /* 0Eh */ - uint16_t flags; - uint16_t timeout; - uint32_t data_xfer_len; + uint16_t flags; /* 10h */ + uint16_t timeout; /* 12h */ + uint32_t data_xfer_len; /* 14h */ - uint32_t sense_buf_phys_addr_lo; - uint32_t sense_buf_phys_addr_hi; + uint32_t sense_buf_phys_addr_lo; /* 18h */ + uint32_t sense_buf_phys_addr_hi; /* 1Ch */ - uint8_t cdb[16]; - union mrsas_sgl sgl; + uint8_t cdb[16]; /* 20h */ + union mrsas_sgl sgl; /* 30h */ }; struct mrsas_dcmd_frame { - uint8_t cmd; - uint8_t reserved_0; - uint8_t cmd_status; - uint8_t reserved_1[4]; - uint8_t sge_count; + uint8_t cmd; /* 00h */ + uint8_t reserved_0; /* 01h */ + uint8_t cmd_status; /* 02h */ + uint8_t reserved_1[4]; /* 03h */ + uint8_t sge_count; /* 07h */ - uint32_t context; - uint8_t req_id; - uint8_t msgvector; - uint16_t pad_0; + uint32_t context; /* 08h */ + uint8_t req_id; /* 0Ch */ + uint8_t msgvector; /* 0Dh */ + uint16_t pad_0; /* 0Eh */ - uint16_t flags; - uint16_t timeout; + uint16_t flags; /* 10h */ + uint16_t timeout; /* 12h */ - uint32_t data_xfer_len; - uint32_t opcode; + uint32_t data_xfer_len; /* 14h */ + uint32_t opcode; /* 18h */ - union { + /* uint8_t mbox[DCMD_MBOX_SZ]; */ /* 1Ch */ + union { /* 1Ch */ uint8_t b[DCMD_MBOX_SZ]; uint16_t s[6]; uint32_t w[3]; } mbox; - union mrsas_sgl sgl; + union mrsas_sgl sgl; /* 28h */ }; struct mrsas_abort_frame { - uint8_t cmd; - uint8_t reserved_0; - uint8_t cmd_status; + uint8_t cmd; /* 00h */ + uint8_t reserved_0; /* 01h */ + uint8_t cmd_status; /* 02h */ - uint8_t reserved_1; - uint32_t reserved_2; + uint8_t reserved_1; /* 03h */ + uint32_t reserved_2; /* 04h */ - uint32_t context; - uint8_t req_id; - uint8_t msgvector; - uint16_t pad_0; + uint32_t context; /* 08h */ + uint8_t req_id; /* 0Ch */ + uint8_t msgvector; /* 0Dh */ + uint16_t pad_0; /* 0Eh */ - uint16_t flags; - uint16_t reserved_3; - uint32_t reserved_4; + uint16_t flags; /* 10h */ + uint16_t reserved_3; /* 12h */ + uint32_t reserved_4; /* 14h */ - uint32_t abort_context; - uint32_t pad_1; + uint32_t abort_context; /* 18h */ + uint32_t pad_1; /* 1Ch */ - uint32_t abort_mfi_phys_addr_lo; - uint32_t abort_mfi_phys_addr_hi; + uint32_t abort_mfi_phys_addr_lo; /* 20h */ + uint32_t abort_mfi_phys_addr_hi; /* 24h */ - uint32_t reserved_5[6]; + uint32_t reserved_5[6]; /* 28h */ }; struct mrsas_smp_frame { - uint8_t cmd; - uint8_t reserved_1; - uint8_t cmd_status; - uint8_t connection_status; + uint8_t cmd; /* 00h */ + uint8_t reserved_1; /* 01h */ + uint8_t cmd_status; /* 02h */ + uint8_t connection_status; /* 03h */ - uint8_t reserved_2[3]; - uint8_t sge_count; + uint8_t reserved_2[3]; /* 04h */ + uint8_t sge_count; /* 07h */ - uint32_t context; - uint8_t req_id; - uint8_t msgvector; - uint16_t pad_0; + uint32_t context; /* 08h */ + uint8_t req_id; /* 0Ch */ + uint8_t msgvector; /* 0Dh */ + uint16_t pad_0; /* 0Eh */ - uint16_t flags; - uint16_t timeout; + uint16_t flags; /* 10h */ + uint16_t timeout; /* 12h */ - uint32_t data_xfer_len; + uint32_t data_xfer_len; /* 14h */ - uint64_t sas_addr; + uint64_t sas_addr; /* 20h */ - union mrsas_sgl sgl[2]; + union mrsas_sgl sgl[2]; /* 28h */ }; struct mrsas_stp_frame { - uint8_t cmd; - uint8_t reserved_1; - uint8_t cmd_status; - uint8_t connection_status; + uint8_t cmd; /* 00h */ + uint8_t reserved_1; /* 01h */ + uint8_t cmd_status; /* 02h */ + uint8_t connection_status; /* 03h */ - uint8_t target_id; - uint8_t reserved_2[2]; - uint8_t sge_count; + uint8_t target_id; /* 04h */ + uint8_t reserved_2[2]; /* 04h */ + uint8_t sge_count; /* 07h */ - uint32_t context; - uint8_t req_id; - uint8_t msgvector; - uint16_t pad_0; + uint32_t context; /* 08h */ + uint8_t req_id; /* 0Ch */ + uint8_t msgvector; /* 0Dh */ + uint16_t pad_0; /* 0Eh */ - uint16_t flags; - uint16_t timeout; + uint16_t flags; /* 10h */ + uint16_t timeout; /* 12h */ - uint32_t data_xfer_len; + uint32_t data_xfer_len; /* 14h */ - uint16_t fis[10]; - uint32_t stp_flags; - union mrsas_sgl sgl; + uint16_t fis[10]; /* 28h */ + uint32_t stp_flags; /* 3C */ + union mrsas_sgl sgl; /* 40 */ }; union mrsas_frame { @@ -1681,144 +1946,109 @@ uint32_t seq_num; uint32_t class_locale_word; }; + #pragma pack() #ifndef DDI_VENDOR_LSI #define DDI_VENDOR_LSI "LSI" #endif /* DDI_VENDOR_LSI */ -#ifndef KMDB_MODULE -static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); -static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t); -#ifdef __sparc -static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t); -#else /* __sparc */ -static int mrsas_quiesce(dev_info_t *); -#endif /* __sparc */ -static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t); -static int mrsas_open(dev_t *, int, int, cred_t *); -static int mrsas_close(dev_t, int, int, cred_t *); -static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); +int mrsas_config_scsi_device(struct mrsas_instance *, + struct scsi_device *, dev_info_t **); + +#ifdef PDSUPPORT +int mrsas_tbolt_config_pd(struct mrsas_instance *, uint16_t, + uint8_t, dev_info_t **); +#endif -static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *, - scsi_hba_tran_t *, struct scsi_device *); -static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register +dev_info_t *mrsas_find_child(struct mrsas_instance *, uint16_t, uint8_t); +int mrsas_service_evt(struct mrsas_instance *, int, int, int, uint64_t); +void return_raid_msg_pkt(struct mrsas_instance *, struct mrsas_cmd *); +struct mrsas_cmd *get_raid_msg_mfi_pkt(struct mrsas_instance *); +void return_raid_msg_mfi_pkt(struct mrsas_instance *, struct mrsas_cmd *); + +int alloc_space_for_mpi2(struct mrsas_instance *); +void fill_up_drv_ver(struct mrsas_drv_ver *dv); + +int mrsas_issue_init_mpi2(struct mrsas_instance *); +struct scsi_pkt *mrsas_tbolt_tran_init_pkt(struct scsi_address *, register struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t); -static int mrsas_tran_start(struct scsi_address *, +int mrsas_tbolt_tran_start(struct scsi_address *, register struct scsi_pkt *); -static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *); -static int mrsas_tran_reset(struct scsi_address *, int); -static int mrsas_tran_getcap(struct scsi_address *, char *, int); -static int mrsas_tran_setcap(struct scsi_address *, char *, int, int); -static void mrsas_tran_destroy_pkt(struct scsi_address *, - struct scsi_pkt *); -static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *); -static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *); -static uint_t mrsas_isr(); -static uint_t mrsas_softintr(); - -static int init_mfi(struct mrsas_instance *); -static int mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t); -static int mrsas_alloc_dma_obj(struct mrsas_instance *, dma_obj_t *, - uchar_t); -static struct mrsas_cmd *get_mfi_pkt(struct mrsas_instance *); -static void return_mfi_pkt(struct mrsas_instance *, +uint32_t tbolt_read_fw_status_reg(struct mrsas_instance *); +void tbolt_issue_cmd(struct mrsas_cmd *, struct mrsas_instance *); +int tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *, + struct mrsas_cmd *); +int tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *, struct mrsas_cmd *); - -static void free_space_for_mfi(struct mrsas_instance *); -static void free_additional_dma_buffer(struct mrsas_instance *); -static int alloc_additional_dma_buffer(struct mrsas_instance *); -static int read_fw_status_reg_ppc(struct mrsas_instance *); -static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *); -static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *, - struct mrsas_cmd *); -static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *, - struct mrsas_cmd *); -static void enable_intr_ppc(struct mrsas_instance *); -static void disable_intr_ppc(struct mrsas_instance *); -static int intr_ack_ppc(struct mrsas_instance *); -static int mfi_state_transition_to_ready(struct mrsas_instance *); -static void destroy_mfi_frame_pool(struct mrsas_instance *); -static int create_mfi_frame_pool(struct mrsas_instance *); -static int mrsas_dma_alloc(struct mrsas_instance *, struct scsi_pkt *, +void tbolt_enable_intr(struct mrsas_instance *); +void tbolt_disable_intr(struct mrsas_instance *); +int tbolt_intr_ack(struct mrsas_instance *); +uint_t mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *); + uint_t tbolt_softintr(); +int mrsas_tbolt_dma(struct mrsas_instance *, uint32_t, int, int (*)()); +int mrsas_check_dma_handle(ddi_dma_handle_t handle); +int mrsas_check_acc_handle(ddi_acc_handle_t handle); +int mrsas_dma_alloc(struct mrsas_instance *, struct scsi_pkt *, struct buf *, int, int (*)()); -static int mrsas_dma_move(struct mrsas_instance *, +int mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *); -static void flush_cache(struct mrsas_instance *instance); -static void display_scsi_inquiry(caddr_t); -static int start_mfi_aen(struct mrsas_instance *instance); -static int handle_drv_ioctl(struct mrsas_instance *instance, - struct mrsas_ioctl *ioctl, int mode); -static int handle_mfi_ioctl(struct mrsas_instance *instance, - struct mrsas_ioctl *ioctl, int mode); -static int handle_mfi_aen(struct mrsas_instance *instance, - struct mrsas_aen *aen); -static void fill_up_drv_ver(struct mrsas_drv_ver *dv); -static struct mrsas_cmd *build_cmd(struct mrsas_instance *instance, - struct scsi_address *ap, struct scsi_pkt *pkt, - uchar_t *cmd_done); -#ifndef __sparc -static int wait_for_outstanding(struct mrsas_instance *instance); -#endif /* __sparc */ -static int register_mfi_aen(struct mrsas_instance *instance, - uint32_t seq_num, uint32_t class_locale_word); -static int issue_mfi_pthru(struct mrsas_instance *instance, struct - mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); -static int issue_mfi_dcmd(struct mrsas_instance *instance, struct - mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); -static int issue_mfi_smp(struct mrsas_instance *instance, struct - mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); -static int issue_mfi_stp(struct mrsas_instance *instance, struct - mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); -static int abort_aen_cmd(struct mrsas_instance *instance, - struct mrsas_cmd *cmd_to_abort); +int mrsas_alloc_dma_obj(struct mrsas_instance *, dma_obj_t *, + uchar_t); +void mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *, struct mrsas_cmd *); +int mrsas_dma_alloc_dmd(struct mrsas_instance *, dma_obj_t *); +void tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *, + struct mrsas_cmd *); +int alloc_req_rep_desc(struct mrsas_instance *); +int mrsas_mode_sense_build(struct scsi_pkt *); +void push_pending_mfi_pkt(struct mrsas_instance *, + struct mrsas_cmd *); +int mrsas_issue_pending_cmds(struct mrsas_instance *); +int mrsas_print_pending_cmds(struct mrsas_instance *); +int mrsas_complete_pending_cmds(struct mrsas_instance *); -static int mrsas_common_check(struct mrsas_instance *instance, - struct mrsas_cmd *cmd); -static void mrsas_fm_init(struct mrsas_instance *instance); -static void mrsas_fm_fini(struct mrsas_instance *instance); -static int mrsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, - const void *); -static void mrsas_fm_ereport(struct mrsas_instance *instance, - char *detail); -static int mrsas_check_dma_handle(ddi_dma_handle_t handle); -static int mrsas_check_acc_handle(ddi_acc_handle_t handle); - -static void mrsas_rem_intrs(struct mrsas_instance *instance); -static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type); +int create_mfi_frame_pool(struct mrsas_instance *); +void destroy_mfi_frame_pool(struct mrsas_instance *); +int create_mfi_mpi_frame_pool(struct mrsas_instance *); +void destroy_mfi_mpi_frame_pool(struct mrsas_instance *); +int create_mpi2_frame_pool(struct mrsas_instance *); +void destroy_mpi2_frame_pool(struct mrsas_instance *); +int mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t); +void mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *); +void free_req_desc_pool(struct mrsas_instance *); +void free_space_for_mpi2(struct mrsas_instance *); +void mrsas_dump_reply_desc(struct mrsas_instance *); +void tbolt_complete_cmd(struct mrsas_instance *, struct mrsas_cmd *); +void display_scsi_inquiry(caddr_t); +void service_mfi_aen(struct mrsas_instance *, struct mrsas_cmd *); +int mrsas_mode_sense_build(struct scsi_pkt *); +int mrsas_tbolt_get_ld_map_info(struct mrsas_instance *); +struct mrsas_cmd *mrsas_tbolt_build_poll_cmd(struct mrsas_instance *, + struct scsi_address *, struct scsi_pkt *, uchar_t *); +int mrsas_tbolt_reset_ppc(struct mrsas_instance *instance); +void mrsas_tbolt_kill_adapter(struct mrsas_instance *instance); +int abort_syncmap_cmd(struct mrsas_instance *, struct mrsas_cmd *); +void mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[], + struct IO_REQUEST_INFO *, Mpi2RaidSCSIIORequest_t *, U32); -static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *, - scsi_hba_tran_t *, struct scsi_device *); -static int mrsas_tran_bus_config(dev_info_t *, uint_t, - ddi_bus_config_op_t, void *, dev_info_t **); -static int mrsas_parse_devname(char *, int *, int *); -static int mrsas_config_all_devices(struct mrsas_instance *); -static int mrsas_config_scsi_device(struct mrsas_instance *, - struct scsi_device *, dev_info_t **); -static int mrsas_config_ld(struct mrsas_instance *, uint16_t, - uint8_t, dev_info_t **); -static dev_info_t *mrsas_find_child(struct mrsas_instance *, uint16_t, - uint8_t); -static int mrsas_name_node(dev_info_t *, char *, int); -static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *); -static int mrsas_service_evt(struct mrsas_instance *, int, int, int, - uint64_t); -static int mrsas_mode_sense_build(struct scsi_pkt *); -static void push_pending_mfi_pkt(struct mrsas_instance *, - struct mrsas_cmd *); -static int mrsas_issue_init_mfi(struct mrsas_instance *); -static int mrsas_issue_pending_cmds(struct mrsas_instance *); -static int mrsas_print_pending_cmds(struct mrsas_instance *); -static int mrsas_complete_pending_cmds(struct mrsas_instance *); -static int mrsas_reset_ppc(struct mrsas_instance *); -static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *); -static int mrsas_kill_adapter(struct mrsas_instance *); -static void io_timeout_checker(void *instance); -static void complete_cmd_in_sync_mode(struct mrsas_instance *, - struct mrsas_cmd *); + +int mrsas_init_adapter_ppc(struct mrsas_instance *instance); +int mrsas_init_adapter_tbolt(struct mrsas_instance *instance); +int mrsas_init_adapter(struct mrsas_instance *instance); + +int mrsas_alloc_cmd_pool(struct mrsas_instance *instance); +void mrsas_free_cmd_pool(struct mrsas_instance *instance); -#endif /* KMDB_MODULE */ +void mrsas_print_cmd_details(struct mrsas_instance *, struct mrsas_cmd *, int); +struct mrsas_cmd *get_raid_msg_pkt(struct mrsas_instance *); + +int mfi_state_transition_to_ready(struct mrsas_instance *); + + +/* FMA functions. */ +int mrsas_common_check(struct mrsas_instance *, struct mrsas_cmd *); +void mrsas_fm_ereport(struct mrsas_instance *, char *); #ifdef __cplusplus diff -r 4eac7a87eff2 -r 267f693f357e usr/src/uts/common/io/mr_sas/mr_sas_list.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/usr/src/uts/common/io/mr_sas/mr_sas_list.c Wed Nov 07 15:57:25 2012 -0500 @@ -0,0 +1,118 @@ +/* + * mr_sas_list.h: header for mr_sas + * + * Solaris MegaRAID driver for SAS2.0 controllers + * Copyright (c) 2008-2012, LSI Logic Corporation. + * All rights reserved. + */ + +/* Copyright 2012 Nexenta Systems, Inc. All rights reserved. */ + +/* + * Extract C functions from LSI-provided mr_sas_list.h such that we can both + * be lint-clean and provide a slightly better source organizational model + * beyond preprocessor abuse. + */ + +#include "mr_sas_list.h" + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void +__list_add(struct mlist_head *new, struct mlist_head *prev, + struct mlist_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} + +/* + * mlist_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +void +mlist_add(struct mlist_head *new, struct mlist_head *head) +{ + __list_add(new, head, head->next); +} + +/* + * mlist_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +void +mlist_add_tail(struct mlist_head *new, struct mlist_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void +__list_del(struct mlist_head *prev, struct mlist_head *next) +{ + next->prev = prev; + prev->next = next; +} + +/* + * mlist_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +void +mlist_del_init(struct mlist_head *entry) +{ + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); +} + +/* + * mlist_empty - tests whether a list is empty + * @head: the list to test. + */ +int +mlist_empty(struct mlist_head *head) +{ + return (head->next == head); +} + +/* + * mlist_splice - join two lists + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +void +mlist_splice(struct mlist_head *list, struct mlist_head *head) +{ + struct mlist_head *first = list->next; + + if (first != list) { + struct mlist_head *last = list->prev; + struct mlist_head *at = head->next; + + first->prev = head; + head->next = first; + + last->next = at; + at->prev = last; + } +} diff -r 4eac7a87eff2 -r 267f693f357e usr/src/uts/common/io/mr_sas/mr_sas_list.h --- a/usr/src/uts/common/io/mr_sas/mr_sas_list.h Wed Nov 07 14:05:17 2012 -0800 +++ b/usr/src/uts/common/io/mr_sas/mr_sas_list.h Wed Nov 07 15:57:25 2012 -0500 @@ -2,7 +2,7 @@ * mr_sas_list.h: header for mr_sas * * Solaris MegaRAID driver for SAS2.0 controllers - * Copyright (c) 2008-2009, LSI Logic Corporation. + * Copyright (c) 2008-2012, LSI Logic Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,11 +33,6 @@ * DAMAGE. */ -/* - * Copyright 2009 Sun Microsystems, Inc. All rights reserved. - * Use is subject to license terms. - */ - #ifndef _MR_SAS_LIST_H_ #define _MR_SAS_LIST_H_ @@ -70,110 +65,12 @@ (ptr)->next = (ptr); (ptr)->prev = (ptr); \ } -#ifndef KMDB_MODULE -/* - * Insert a new entry between two known consecutive entries. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -static void __list_add(struct mlist_head *new, - struct mlist_head *prev, - struct mlist_head *next) -{ - next->prev = new; - new->next = next; - new->prev = prev; - prev->next = new; -} - -/* - * mlist_add - add a new entry - * @new: new entry to be added - * @head: list head to add it after - * - * Insert a new entry after the specified head. - * This is good for implementing stacks. - */ -static void mlist_add(struct mlist_head *new, struct mlist_head *head) -{ - __list_add(new, head, head->next); -} - - -/* - * mlist_add_tail - add a new entry - * @new: new entry to be added - * @head: list head to add it before - * - * Insert a new entry before the specified head. - * This is useful for implementing queues. - */ -static void mlist_add_tail(struct mlist_head *new, struct mlist_head *head) -{ - __list_add(new, head->prev, head); -} - - - -/* - * Delete a list entry by making the prev/next entries - * point to each other. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -static void __list_del(struct mlist_head *prev, - struct mlist_head *next) -{ - next->prev = prev; - prev->next = next; -} - - -/* - * mlist_del_init - deletes entry from list and reinitialize it. - * @entry: the element to delete from the list. - */ -static void mlist_del_init(struct mlist_head *entry) -{ - __list_del(entry->prev, entry->next); - INIT_LIST_HEAD(entry); -} - - -/* - * mlist_empty - tests whether a list is empty - * @head: the list to test. - */ -static int mlist_empty(struct mlist_head *head) -{ - return (head->next == head); -} - - -/* - * mlist_splice - join two lists - * @list: the new list to add. - * @head: the place to add it in the first list. - */ -static void mlist_splice(struct mlist_head *list, struct mlist_head *head) -{ - struct mlist_head *first = list->next; - - if (first != list) { - struct mlist_head *last = list->prev; - struct mlist_head *at = head->next; - - first->prev = head; - head->next = first; - - last->next = at; - at->prev = last; - } -} -#endif /* KMDB_MODULE */ +void mlist_add(struct mlist_head *, struct mlist_head *); +void mlist_add_tail(struct mlist_head *, struct mlist_head *); +void mlist_del_init(struct mlist_head *); +int mlist_empty(struct mlist_head *); +void mlist_splice(struct mlist_head *, struct mlist_head *); /* * mlist_entry - get the struct for this entry diff -r 4eac7a87eff2 -r 267f693f357e usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c Wed Nov 07 15:57:25 2012 -0500 @@ -0,0 +1,3776 @@ +/* + * mr_sas_tbolt.c: source for mr_sas driver for New Generation. + * i.e. Thunderbolt and Invader + * + * Solaris MegaRAID device driver for SAS2.0 controllers + * Copyright (c) 2008-2012, LSI Logic Corporation. + * All rights reserved. + * + * Version: + * Author: + * Swaminathan K S + * Arun Chandrashekhar + * Manju R + * Rasheed + * Shakeel Bukhari + */ + + +#include +#include +#include +#include +#include +#include "ld_pd_map.h" +#include "mr_sas.h" +#include "fusion.h" + +/* + * FMA header files + */ +#include +#include +#include +#include + + +/* Pre-TB command size and TB command size. */ +#define MR_COMMAND_SIZE (64*20) /* 1280 bytes */ +MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map); +U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map); +U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map); +U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *); +extern ddi_dma_attr_t mrsas_generic_dma_attr; +extern uint32_t mrsas_tbolt_max_cap_maxxfer; +extern struct ddi_device_acc_attr endian_attr; +extern int debug_level_g; +extern unsigned int enable_fp; +volatile int dump_io_wait_time = 90; +extern void +io_timeout_checker(void *arg); +extern volatile int debug_timeout_g; +extern int mrsas_issue_pending_cmds(struct mrsas_instance *); +extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance); +extern void push_pending_mfi_pkt(struct mrsas_instance *, + struct mrsas_cmd *); +extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *, + MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *); + +/* Local static prototypes. */ +static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *, + struct scsi_address *, struct scsi_pkt *, uchar_t *); +static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, + U64 start_blk, U32 num_blocks); +static int mrsas_tbolt_check_map_info(struct mrsas_instance *); +static int mrsas_tbolt_sync_map_info(struct mrsas_instance *); +static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *); +static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *); +#ifdef PDSUPPORT +static void mrsas_tbolt_get_pd_info(struct mrsas_instance *, + struct mrsas_tbolt_pd_info *, int); +#endif /* PDSUPPORT */ + +static int debug_tbolt_fw_faults_after_ocr_g = 0; + +/* + * destroy_mfi_mpi_frame_pool + */ +void +destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance) +{ + int i; + + struct mrsas_cmd *cmd; + + /* return all mfi frames to pool */ + for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) { + cmd = instance->cmd_list[i]; + if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) { + (void) mrsas_free_dma_obj(instance, + cmd->frame_dma_obj); + } + cmd->frame_dma_obj_status = DMA_OBJ_FREED; + } +} + +/* + * destroy_mpi2_frame_pool + */ +void +destroy_mpi2_frame_pool(struct mrsas_instance *instance) +{ + + if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) { + (void) mrsas_free_dma_obj(instance, + instance->mpi2_frame_pool_dma_obj); + instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED; + } +} + + +/* + * mrsas_tbolt_free_additional_dma_buffer + */ +void +mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance) +{ + int i; + + if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { + (void) mrsas_free_dma_obj(instance, + instance->mfi_internal_dma_obj); + instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; + } + if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { + (void) mrsas_free_dma_obj(instance, + instance->mfi_evt_detail_obj); + instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; + } + + for (i = 0; i < 2; i++) { + if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) { + (void) mrsas_free_dma_obj(instance, + instance->ld_map_obj[i]); + instance->ld_map_obj[i].status = DMA_OBJ_FREED; + } + } +} + + +/* + * free_req_desc_pool + */ +void +free_req_rep_desc_pool(struct mrsas_instance *instance) +{ + if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) { + (void) mrsas_free_dma_obj(instance, + instance->request_desc_dma_obj); + instance->request_desc_dma_obj.status = DMA_OBJ_FREED; + } + + if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) { + (void) mrsas_free_dma_obj(instance, + instance->reply_desc_dma_obj); + instance->reply_desc_dma_obj.status = DMA_OBJ_FREED; + } + + +} + + +/* + * ThunderBolt(TB) Request Message Frame Pool + */ +int +create_mpi2_frame_pool(struct mrsas_instance *instance) +{ + int i = 0; + uint16_t max_cmd; + uint32_t sgl_sz; + uint32_t raid_msg_size; + uint32_t total_size; + uint32_t offset; + uint32_t io_req_base_phys; + uint8_t *io_req_base; + struct mrsas_cmd *cmd; + + max_cmd = instance->max_fw_cmds; + + sgl_sz = 1024; + raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE; + + /* Allocating additional 256 bytes to accomodate SMID 0. */ + total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) + + (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH); + + con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: " + "max_cmd %x", max_cmd)); + + con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: " + "request message frame pool size %x", total_size)); + + /* + * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory + * and then split the memory to 1024 commands. Each command should be + * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME + * within it. Further refer the "alloc_req_rep_desc" function where + * we allocate request/reply descriptors queues for a clue. + */ + + instance->mpi2_frame_pool_dma_obj.size = total_size; + instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr; + instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi = + 0xFFFFFFFFU; + instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max = + 0xFFFFFFFFU; + instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1; + instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256; + + if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj, + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { + cmn_err(CE_WARN, + "mr_sas: could not alloc mpi2 frame pool"); + return (DDI_FAILURE); + } + + bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size); + instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED; + + instance->io_request_frames = + (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer; + instance->io_request_frames_phy = + (uint32_t) + instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address; + + con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p", + (void *)instance->io_request_frames)); + + con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x", + instance->io_request_frames_phy)); + + io_req_base = (uint8_t *)instance->io_request_frames + + MRSAS_THUNDERBOLT_MSG_SIZE; + io_req_base_phys = instance->io_request_frames_phy + + MRSAS_THUNDERBOLT_MSG_SIZE; + + con_log(CL_DLEVEL3, (CE_NOTE, + "io req_base_phys 0x%x", io_req_base_phys)); + + for (i = 0; i < max_cmd; i++) { + cmd = instance->cmd_list[i]; + + offset = i * MRSAS_THUNDERBOLT_MSG_SIZE; + + cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *) + ((uint8_t *)io_req_base + offset); + cmd->scsi_io_request_phys_addr = io_req_base_phys + offset; + + cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base + + (max_cmd * raid_msg_size) + i * sgl_sz); + + cmd->sgl_phys_addr = (io_req_base_phys + + (max_cmd * raid_msg_size) + i * sgl_sz); + + cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base + + (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) + + (i * SENSE_LENGTH)); + + cmd->sense_phys_addr1 = (io_req_base_phys + + (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) + + (i * SENSE_LENGTH)); + + + cmd->SMID = i + 1; + + con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p", + cmd->index, (void *)cmd->scsi_io_request)); + + con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x", + cmd->index, cmd->scsi_io_request_phys_addr)); + + con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p", + cmd->index, (void *)cmd->sense1)); + + con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x", + cmd->index, cmd->sense_phys_addr1)); + + con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p", + cmd->index, (void *)cmd->sgl)); + + con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x", + cmd->index, cmd->sgl_phys_addr)); + } + + return (DDI_SUCCESS); + +} + + +/* + * alloc_additional_dma_buffer for AEN + */ +int +mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance) +{ + uint32_t internal_buf_size = PAGESIZE*2; + int i; + + /* Initialize buffer status as free */ + instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; + instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; + instance->ld_map_obj[0].status = DMA_OBJ_FREED; + instance->ld_map_obj[1].status = DMA_OBJ_FREED; + + + instance->mfi_internal_dma_obj.size = internal_buf_size; + instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr; + instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; + instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = + 0xFFFFFFFFU; + instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; + + if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj, + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { + cmn_err(CE_WARN, + "mr_sas: could not alloc reply queue"); + return (DDI_FAILURE); + } + + bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); + + instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; + instance->internal_buf = + (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer)); + instance->internal_buf_dmac_add = + instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address; + instance->internal_buf_size = internal_buf_size; + + /* allocate evt_detail */ + instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail); + instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr; + instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; + instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; + instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; + instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8; + + if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj, + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { + cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: " + "could not allocate data transfer buffer."); + goto fail_tbolt_additional_buff; + } + + bzero(instance->mfi_evt_detail_obj.buffer, + sizeof (struct mrsas_evt_detail)); + + instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; + + instance->size_map_info = sizeof (MR_FW_RAID_MAP) + + (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1)); + + for (i = 0; i < 2; i++) { + /* allocate the data transfer buffer */ + instance->ld_map_obj[i].size = instance->size_map_info; + instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr; + instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; + instance->ld_map_obj[i].dma_attr.dma_attr_count_max = + 0xFFFFFFFFU; + instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1; + instance->ld_map_obj[i].dma_attr.dma_attr_align = 1; + + if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i], + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { + cmn_err(CE_WARN, + "could not allocate data transfer buffer."); + goto fail_tbolt_additional_buff; + } + + instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED; + + bzero(instance->ld_map_obj[i].buffer, instance->size_map_info); + + instance->ld_map[i] = + (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer; + instance->ld_map_phy[i] = (uint32_t)instance-> + ld_map_obj[i].dma_cookie[0].dmac_address; + + con_log(CL_DLEVEL3, (CE_NOTE, + "ld_map Addr Phys 0x%x", instance->ld_map_phy[i])); + + con_log(CL_DLEVEL3, (CE_NOTE, + "size_map_info 0x%x", instance->size_map_info)); + } + + return (DDI_SUCCESS); + +fail_tbolt_additional_buff: + mrsas_tbolt_free_additional_dma_buffer(instance); + + return (DDI_FAILURE); +} + +MRSAS_REQUEST_DESCRIPTOR_UNION * +mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index) +{ + MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; + + if (index > instance->max_fw_cmds) { + con_log(CL_ANN1, (CE_NOTE, + "Invalid SMID 0x%x request for descriptor", index)); + con_log(CL_ANN1, (CE_NOTE, + "max_fw_cmds : 0x%x", instance->max_fw_cmds)); + return (NULL); + } + + req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *) + ((char *)instance->request_message_pool + + (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index)); + + con_log(CL_ANN1, (CE_NOTE, + "request descriptor : 0x%08lx", (unsigned long)req_desc)); + + con_log(CL_ANN1, (CE_NOTE, + "request descriptor base phy : 0x%08lx", + (unsigned long)instance->request_message_pool_phy)); + + return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc); +} + + +/* + * Allocate Request and Reply Queue Descriptors. + */ +int +alloc_req_rep_desc(struct mrsas_instance *instance) +{ + uint32_t request_q_sz, reply_q_sz; + int i, max_reply_q_sz; + MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; + + /* + * ThunderBolt(TB) There's no longer producer consumer mechanism. + * Once we have an interrupt we are supposed to scan through the list of + * reply descriptors and process them accordingly. We would be needing + * to allocate memory for 1024 reply descriptors + */ + + /* Allocate Reply Descriptors */ + con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x", + (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION))); + + /* reply queue size should be multiple of 16 */ + max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16; + + reply_q_sz = 8 * max_reply_q_sz; + + + con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x", + (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION))); + + instance->reply_desc_dma_obj.size = reply_q_sz; + instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr; + instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; + instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; + instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1; + instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16; + + if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj, + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { + cmn_err(CE_WARN, + "mr_sas: could not alloc reply queue"); + return (DDI_FAILURE); + } + + bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz); + instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED; + + /* virtual address of reply queue */ + instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)( + instance->reply_desc_dma_obj.buffer); + + instance->reply_q_depth = max_reply_q_sz; + + con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x", + instance->reply_q_depth)); + + con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p", + (void *)instance->reply_frame_pool)); + + /* initializing reply address to 0xFFFFFFFF */ + reply_desc = instance->reply_frame_pool; + + for (i = 0; i < instance->reply_q_depth; i++) { + reply_desc->Words = (uint64_t)~0; + reply_desc++; + } + + + instance->reply_frame_pool_phy = + (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address; + + con_log(CL_ANN1, (CE_NOTE, + "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy)); + + + instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy + + reply_q_sz); + + con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x", + instance->reply_pool_limit_phy)); + + + con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x", + (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION))); + + /* Allocate Request Descriptors */ + con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x", + (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION))); + + request_q_sz = 8 * + (instance->max_fw_cmds); + + instance->request_desc_dma_obj.size = request_q_sz; + instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr; + instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; + instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = + 0xFFFFFFFFU; + instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1; + instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16; + + if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj, + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { + cmn_err(CE_WARN, + "mr_sas: could not alloc request queue desc"); + goto fail_undo_reply_queue; + } + + bzero(instance->request_desc_dma_obj.buffer, request_q_sz); + instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED; + + /* virtual address of request queue desc */ + instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *) + (instance->request_desc_dma_obj.buffer); + + instance->request_message_pool_phy = + (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address; + + return (DDI_SUCCESS); + +fail_undo_reply_queue: + if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) { + (void) mrsas_free_dma_obj(instance, + instance->reply_desc_dma_obj); + instance->reply_desc_dma_obj.status = DMA_OBJ_FREED; + } + + return (DDI_FAILURE); +} + +/* + * mrsas_alloc_cmd_pool_tbolt + * + * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single + * routine + */ +int +mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance) +{ + int i; + int count; + uint32_t max_cmd; + uint32_t reserve_cmd; + size_t sz; + + struct mrsas_cmd *cmd; + + max_cmd = instance->max_fw_cmds; + con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: " + "max_cmd %x", max_cmd)); + + + sz = sizeof (struct mrsas_cmd *) * max_cmd; + + /* + * instance->cmd_list is an array of struct mrsas_cmd pointers. + * Allocate the dynamic array first and then allocate individual + * commands. + */ + instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); + + /* create a frame pool and assign one frame to each cmd */ + for (count = 0; count < max_cmd; count++) { + instance->cmd_list[count] = + kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP); + } + + /* add all the commands to command pool */ + + INIT_LIST_HEAD(&instance->cmd_pool_list); + INIT_LIST_HEAD(&instance->cmd_pend_list); + INIT_LIST_HEAD(&instance->cmd_app_pool_list); + + reserve_cmd = MRSAS_APP_RESERVED_CMDS; + + /* cmd index 0 reservered for IOC INIT */ + for (i = 1; i < reserve_cmd; i++) { + cmd = instance->cmd_list[i]; + cmd->index = i; + mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list); + } + + + for (i = reserve_cmd; i < max_cmd; i++) { + cmd = instance->cmd_list[i]; + cmd->index = i; + mlist_add_tail(&cmd->list, &instance->cmd_pool_list); + } + + return (DDI_SUCCESS); + +mrsas_undo_cmds: + if (count > 0) { + /* free each cmd */ + for (i = 0; i < count; i++) { + if (instance->cmd_list[i] != NULL) { + kmem_free(instance->cmd_list[i], + sizeof (struct mrsas_cmd)); + } + instance->cmd_list[i] = NULL; + } + } + +mrsas_undo_cmd_list: + if (instance->cmd_list != NULL) + kmem_free(instance->cmd_list, sz); + instance->cmd_list = NULL; + + return (DDI_FAILURE); +} + + +/* + * free_space_for_mpi2 + */ +void +free_space_for_mpi2(struct mrsas_instance *instance) +{ + /* already freed */ + if (instance->cmd_list == NULL) { + return; + } + + /* First free the additional DMA buffer */ + mrsas_tbolt_free_additional_dma_buffer(instance); + + /* Free the request/reply descriptor pool */ + free_req_rep_desc_pool(instance); + + /* Free the MPI message pool */ + destroy_mpi2_frame_pool(instance); + + /* Free the MFI frame pool */ + destroy_mfi_frame_pool(instance); + + /* Free all the commands in the cmd_list */ + /* Free the cmd_list buffer itself */ + mrsas_free_cmd_pool(instance); +} + + +/* + * ThunderBolt(TB) memory allocations for commands/messages/frames. + */ +int +alloc_space_for_mpi2(struct mrsas_instance *instance) +{ + /* Allocate command pool (memory for cmd_list & individual commands) */ + if (mrsas_alloc_cmd_pool_tbolt(instance)) { + cmn_err(CE_WARN, "Error creating cmd pool"); + return (DDI_FAILURE); + } + + /* Initialize single reply size and Message size */ + instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE; + instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE; + + instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE - + (sizeof (MPI2_RAID_SCSI_IO_REQUEST) - + sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION); + instance->max_sge_in_chain = (MR_COMMAND_SIZE - + MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION); + + /* Reduce SG count by 1 to take care of group cmds feature in FW */ + instance->max_num_sge = (instance->max_sge_in_main_msg + + instance->max_sge_in_chain - 2); + instance->chain_offset_mpt_msg = + offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16; + instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE - + sizeof (MPI2_SGE_IO_UNION)) / 16; + instance->reply_read_index = 0; + + + /* Allocate Request and Reply descriptors Array */ + /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */ + if (alloc_req_rep_desc(instance)) { + cmn_err(CE_WARN, + "Error, allocating memory for descripter-pool"); + goto mpi2_undo_cmd_pool; + } + con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x", + instance->request_message_pool_phy)); + + + /* Allocate MFI Frame pool - for MPI-MFI passthru commands */ + if (create_mfi_frame_pool(instance)) { + cmn_err(CE_WARN, + "Error, allocating memory for MFI frame-pool"); + goto mpi2_undo_descripter_pool; + } + + + /* Allocate MPI2 Message pool */ + /* + * Make sure the buffer is alligned to 256 for raid message packet + * create a io request pool and assign one frame to each cmd + */ + + if (create_mpi2_frame_pool(instance)) { + cmn_err(CE_WARN, + "Error, allocating memory for MPI2 Message-pool"); + goto mpi2_undo_mfi_frame_pool; + } + +#ifdef DEBUG + con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x", + instance->max_sge_in_main_msg)); + con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x", + instance->max_sge_in_chain)); + con_log(CL_ANN1, (CE_CONT, + "[max_sge]0x%x", instance->max_num_sge)); + con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x", + instance->chain_offset_mpt_msg)); + con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x", + instance->chain_offset_io_req)); +#endif + + + /* Allocate additional dma buffer */ + if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) { + cmn_err(CE_WARN, + "Error, allocating tbolt additional DMA buffer"); + goto mpi2_undo_message_pool; + } + + return (DDI_SUCCESS); + +mpi2_undo_message_pool: + destroy_mpi2_frame_pool(instance); + +mpi2_undo_mfi_frame_pool: + destroy_mfi_frame_pool(instance); + +mpi2_undo_descripter_pool: + free_req_rep_desc_pool(instance); + +mpi2_undo_cmd_pool: + mrsas_free_cmd_pool(instance); + + return (DDI_FAILURE); +} + + +/* + * mrsas_init_adapter_tbolt - Initialize fusion interface adapter. + */ +int +mrsas_init_adapter_tbolt(struct mrsas_instance *instance) +{ + + /* + * Reduce the max supported cmds by 1. This is to ensure that the + * reply_q_sz (1 more than the max cmd that driver may send) + * does not exceed max cmds that the FW can support + */ + + if (instance->max_fw_cmds > 1008) { + instance->max_fw_cmds = 1008; + instance->max_fw_cmds = instance->max_fw_cmds-1; + } + + con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: " + " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds)); + + + /* create a pool of commands */ + if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) { + cmn_err(CE_WARN, + " alloc_space_for_mpi2() failed."); + + return (DDI_FAILURE); + } + + /* Send ioc init message */ + /* NOTE: the issue_init call does FMA checking already. */ + if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) { + cmn_err(CE_WARN, + " mrsas_issue_init_mpi2() failed."); + + goto fail_init_fusion; + } + + instance->unroll.alloc_space_mpi2 = 1; + + con_log(CL_ANN, (CE_NOTE, + "mrsas_init_adapter_tbolt: SUCCESSFUL")); + + return (DDI_SUCCESS); + +fail_init_fusion: + free_space_for_mpi2(instance); + + return (DDI_FAILURE); +} + + + +/* + * init_mpi2 + */ +int +mrsas_issue_init_mpi2(struct mrsas_instance *instance) +{ + dma_obj_t init2_dma_obj; + int ret_val = DDI_SUCCESS; + + /* allocate DMA buffer for IOC INIT message */ + init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t); + init2_dma_obj.dma_attr = mrsas_generic_dma_attr; + init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; + init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; + init2_dma_obj.dma_attr.dma_attr_sgllen = 1; + init2_dma_obj.dma_attr.dma_attr_align = 256; + + if (mrsas_alloc_dma_obj(instance, &init2_dma_obj, + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { + cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 " + "could not allocate data transfer buffer."); + return (DDI_FAILURE); + } + (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t)); + + con_log(CL_ANN1, (CE_NOTE, + "mrsas_issue_init_mpi2 _phys adr: %x", + init2_dma_obj.dma_cookie[0].dmac_address)); + + + /* Initialize and send ioc init message */ + ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj); + if (ret_val == DDI_FAILURE) { + con_log(CL_ANN1, (CE_WARN, + "mrsas_issue_init_mpi2: Failed")); + goto fail_init_mpi2; + } + + /* free IOC init DMA buffer */ + if (mrsas_free_dma_obj(instance, init2_dma_obj) + != DDI_SUCCESS) { + con_log(CL_ANN1, (CE_WARN, + "mrsas_issue_init_mpi2: Free Failed")); + return (DDI_FAILURE); + } + + /* Get/Check and sync ld_map info */ + instance->map_id = 0; + if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS) + (void) mrsas_tbolt_sync_map_info(instance); + + + /* No mrsas_cmd to send, so send NULL. */ + if (mrsas_common_check(instance, NULL) != DDI_SUCCESS) + goto fail_init_mpi2; + + con_log(CL_ANN, (CE_NOTE, + "mrsas_issue_init_mpi2: SUCCESSFUL")); + + return (DDI_SUCCESS); + +fail_init_mpi2: + (void) mrsas_free_dma_obj(instance, init2_dma_obj); + + return (DDI_FAILURE); +} + +static int +mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj) +{ + int numbytes; + uint16_t flags; + struct mrsas_init_frame2 *mfiFrameInit2; + struct mrsas_header *frame_hdr; + Mpi2IOCInitRequest_t *init; + struct mrsas_cmd *cmd = NULL; + struct mrsas_drv_ver drv_ver_info; + MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; + + con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + + +#ifdef DEBUG + con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n", + (int)sizeof (*mfiFrameInit2))); + con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init))); + con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n", + (int)sizeof (struct mrsas_init_frame2))); + con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", + (int)sizeof (Mpi2IOCInitRequest_t))); +#endif + + init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer; + numbytes = sizeof (*init); + bzero(init, numbytes); + + ddi_put8(mpi2_dma_obj->acc_handle, &init->Function, + MPI2_FUNCTION_IOC_INIT); + + ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit, + MPI2_WHOINIT_HOST_DRIVER); + + /* set MsgVersion and HeaderVersion host driver was built with */ + ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion, + MPI2_VERSION); + + ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion, + MPI2_HEADER_VERSION); + + ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize, + instance->raid_io_msg_size / 4); + + ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth, + 0); + + ddi_put16(mpi2_dma_obj->acc_handle, + &init->ReplyDescriptorPostQueueDepth, + instance->reply_q_depth); + /* + * These addresses are set using the DMA cookie addresses from when the + * memory was allocated. Sense buffer hi address should be 0. + * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0); + */ + + ddi_put32(mpi2_dma_obj->acc_handle, + &init->SenseBufferAddressHigh, 0); + + ddi_put64(mpi2_dma_obj->acc_handle, + (uint64_t *)&init->SystemRequestFrameBaseAddress, + instance->io_request_frames_phy); + + ddi_put64(mpi2_dma_obj->acc_handle, + &init->ReplyDescriptorPostQueueAddress, + instance->reply_frame_pool_phy); + + ddi_put64(mpi2_dma_obj->acc_handle, + &init->ReplyFreeQueueAddress, 0); + + cmd = instance->cmd_list[0]; + if (cmd == NULL) { + return (DDI_FAILURE); + } + cmd->retry_count_for_ocr = 0; + cmd->pkt = NULL; + cmd->drv_pkt_time = 0; + + mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request; + con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2)); + + frame_hdr = &cmd->frame->hdr; + + ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, + MFI_CMD_STATUS_POLL_MODE); + + flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); + + flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + + ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); + + con_log(CL_ANN, (CE_CONT, + "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID)); + + /* Init the MFI Header */ + ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, + &mfiFrameInit2->cmd, MFI_CMD_OP_INIT); + + con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd)); + + ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, + &mfiFrameInit2->cmd_status, + MFI_STAT_INVALID_STATUS); + + con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status)); + + ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle, + &mfiFrameInit2->queue_info_new_phys_addr_lo, + mpi2_dma_obj->dma_cookie[0].dmac_address); + + ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle, + &mfiFrameInit2->data_xfer_len, + sizeof (Mpi2IOCInitRequest_t)); + + con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x", + (int)init->ReplyDescriptorPostQueueAddress)); + + /* fill driver version information */ + fill_up_drv_ver(&drv_ver_info); + + /* allocate the driver version data transfer buffer */ + instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver); + instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr; + instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; + instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; + instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1; + instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1; + + if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj, + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { + cmn_err(CE_WARN, + "fusion init: Could not allocate driver version buffer."); + return (DDI_FAILURE); + } + /* copy driver version to dma buffer */ + bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver)); + ddi_rep_put8(cmd->frame_dma_obj.acc_handle, + (uint8_t *)drv_ver_info.drv_ver, + (uint8_t *)instance->drv_ver_dma_obj.buffer, + sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR); + + /* send driver version physical address to firmware */ + ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion, + instance->drv_ver_dma_obj.dma_cookie[0].dmac_address); + + con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x", + mfiFrameInit2->queue_info_new_phys_addr_lo, + (int)sizeof (Mpi2IOCInitRequest_t))); + + con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len)); + + con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x", + cmd->scsi_io_request_phys_addr, + (int)sizeof (struct mrsas_init_frame2))); + + /* disable interrupts before sending INIT2 frame */ + instance->func_ptr->disable_intr(instance); + + req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *) + instance->request_message_pool; + req_desc->Words = cmd->scsi_io_request_phys_addr; + req_desc->MFAIo.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + + cmd->request_desc = req_desc; + + /* issue the init frame */ + instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd); + + con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd)); + con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ", + frame_hdr->cmd_status)); + + if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, + &mfiFrameInit2->cmd_status) == 0) { + con_log(CL_ANN, (CE_NOTE, "INIT2 Success")); + } else { + con_log(CL_ANN, (CE_WARN, "INIT2 Fail")); + mrsas_dump_reply_desc(instance); + goto fail_ioc_init; + } + + mrsas_dump_reply_desc(instance); + + instance->unroll.verBuff = 1; + + con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL")); + + return (DDI_SUCCESS); + + +fail_ioc_init: + + (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj); + + return (DDI_FAILURE); +} + +int +wait_for_outstanding_poll_io(struct mrsas_instance *instance) +{ + int i; + uint32_t wait_time = dump_io_wait_time; + for (i = 0; i < wait_time; i++) { + /* + * Check For Outstanding poll Commands + * except ldsync command and aen command + */ + if (instance->fw_outstanding <= 2) { + break; + } + drv_usecwait(10*MILLISEC); + /* complete commands from reply queue */ + (void) mr_sas_tbolt_process_outstanding_cmd(instance); + } + if (instance->fw_outstanding > 2) { + return (1); + } + return (0); +} +/* + * scsi_pkt handling + * + * Visible to the external world via the transport structure. + */ + +int +mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) +{ + struct mrsas_instance *instance = ADDR2MR(ap); + struct scsa_cmd *acmd = PKT2CMD(pkt); + struct mrsas_cmd *cmd = NULL; + uchar_t cmd_done = 0; + + con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); + if (instance->deadadapter == 1) { + cmn_err(CE_WARN, + "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR " + "for IO, as the HBA doesnt take any more IOs"); + if (pkt) { + pkt->pkt_reason = CMD_DEV_GONE; + pkt->pkt_statistics = STAT_DISCON; + } + return (TRAN_FATAL_ERROR); + } + if (instance->adapterresetinprogress) { + con_log(CL_ANN, (CE_NOTE, "Reset flag set, " + "returning mfi_pkt and setting TRAN_BUSY\n")); + return (TRAN_BUSY); + } + (void) mrsas_tbolt_prepare_pkt(acmd); + + cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done); + + /* + * Check if the command is already completed by the mrsas_build_cmd() + * routine. In which case the busy_flag would be clear and scb will be + * NULL and appropriate reason provided in pkt_reason field + */ + if (cmd_done) { + pkt->pkt_reason = CMD_CMPLT; + pkt->pkt_scbp[0] = STATUS_GOOD; + pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET + | STATE_SENT_CMD; + if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { + (*pkt->pkt_comp)(pkt); + } + + return (TRAN_ACCEPT); + } + + if (cmd == NULL) { + return (TRAN_BUSY); + } + + + if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { + if (instance->fw_outstanding > instance->max_fw_cmds) { + cmn_err(CE_WARN, + "Command Queue Full... Returning BUSY"); + return_raid_msg_pkt(instance, cmd); + return (TRAN_BUSY); + } + + /* Synchronize the Cmd frame for the controller */ + (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, + DDI_DMA_SYNC_FORDEV); + + con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x " + "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0], + cmd->index, cmd->SMID)); + + instance->func_ptr->issue_cmd(cmd, instance); + } else { + instance->func_ptr->issue_cmd(cmd, instance); + (void) wait_for_outstanding_poll_io(instance); + (void) mrsas_common_check(instance, cmd); + } + + return (TRAN_ACCEPT); +} + +/* + * prepare the pkt: + * the pkt may have been resubmitted or just reused so + * initialize some fields and do some checks. + */ +static int +mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd) +{ + struct scsi_pkt *pkt = CMD2PKT(acmd); + + + /* + * Reinitialize some fields that need it; the packet may + * have been resubmitted + */ + pkt->pkt_reason = CMD_CMPLT; + pkt->pkt_state = 0; + pkt->pkt_statistics = 0; + pkt->pkt_resid = 0; + + /* + * zero status byte. + */ + *(pkt->pkt_scbp) = 0; + + return (0); +} + + +int +mr_sas_tbolt_build_sgl(struct mrsas_instance *instance, + struct scsa_cmd *acmd, + struct mrsas_cmd *cmd, + Mpi2RaidSCSIIORequest_t *scsi_raid_io, + uint32_t *datalen) +{ + uint32_t MaxSGEs; + int sg_to_process; + uint32_t i, j; + uint32_t numElements, endElement; + Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL; + Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL; + ddi_acc_handle_t acc_handle = + instance->mpi2_frame_pool_dma_obj.acc_handle; + + con_log(CL_ANN1, (CE_NOTE, + "chkpnt: Building Chained SGL :%d", __LINE__)); + + /* Calulate SGE size in number of Words(32bit) */ + /* Clear the datalen before updating it. */ + *datalen = 0; + + MaxSGEs = instance->max_sge_in_main_msg; + + ddi_put16(acc_handle, &scsi_raid_io->SGLFlags, + MPI2_SGE_FLAGS_64_BIT_ADDRESSING); + + /* set data transfer flag. */ + if (acmd->cmd_flags & CFLAG_DMASEND) { + ddi_put32(acc_handle, &scsi_raid_io->Control, + MPI2_SCSIIO_CONTROL_WRITE); + } else { + ddi_put32(acc_handle, &scsi_raid_io->Control, + MPI2_SCSIIO_CONTROL_READ); + } + + + numElements = acmd->cmd_cookiecnt; + + con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements)); + + if (numElements > instance->max_num_sge) { + con_log(CL_ANN, (CE_NOTE, + "[Max SGE Count Exceeded]:%x", numElements)); + return (numElements); + } + + ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE, + (uint8_t)numElements); + + /* set end element in main message frame */ + endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1); + + /* prepare the scatter-gather list for the firmware */ + scsi_raid_io_sgl_ieee = + (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain; + + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) { + Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee; + sgl_ptr_end += instance->max_sge_in_main_msg - 1; + + ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0); + } + + for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) { + ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address, + acmd->cmd_dmacookies[i].dmac_laddress); + + ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, + acmd->cmd_dmacookies[i].dmac_size); + + ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0); + + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) { + if (i == (numElements - 1)) { + ddi_put8(acc_handle, + &scsi_raid_io_sgl_ieee->Flags, + IEEE_SGE_FLAGS_END_OF_LIST); + } + } + + *datalen += acmd->cmd_dmacookies[i].dmac_size; + +#ifdef DEBUG + con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64, + scsi_raid_io_sgl_ieee->Address)); + con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x", + scsi_raid_io_sgl_ieee->Length)); + con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x", + scsi_raid_io_sgl_ieee->Flags)); +#endif + + } + + ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0); + + /* check if chained SGL required */ + if (i < numElements) { + + con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i)); + + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) { + uint16_t ioFlags = + ddi_get16(acc_handle, &scsi_raid_io->IoFlags); + + if ((ioFlags & + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) { + ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, + (U8)instance->chain_offset_io_req); + } else { + ddi_put8(acc_handle, + &scsi_raid_io->ChainOffset, 0); + } + } else { + ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, + (U8)instance->chain_offset_io_req); + } + + /* prepare physical chain element */ + ieeeChainElement = scsi_raid_io_sgl_ieee; + + ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0); + + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) { + ddi_put8(acc_handle, &ieeeChainElement->Flags, + IEEE_SGE_FLAGS_CHAIN_ELEMENT); + } else { + ddi_put8(acc_handle, &ieeeChainElement->Flags, + (IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR)); + } + + ddi_put32(acc_handle, &ieeeChainElement->Length, + (sizeof (MPI2_SGE_IO_UNION) * (numElements - i))); + + ddi_put64(acc_handle, &ieeeChainElement->Address, + (U64)cmd->sgl_phys_addr); + + sg_to_process = numElements - i; + + con_log(CL_ANN1, (CE_NOTE, + "[Additional SGE Count]:%x", endElement)); + + /* point to the chained SGL buffer */ + scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl; + + /* build rest of the SGL in chained buffer */ + for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) { + con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i)); + + ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address, + acmd->cmd_dmacookies[i].dmac_laddress); + + ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, + acmd->cmd_dmacookies[i].dmac_size); + + ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0); + + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) { + if (i == (numElements - 1)) { + ddi_put8(acc_handle, + &scsi_raid_io_sgl_ieee->Flags, + IEEE_SGE_FLAGS_END_OF_LIST); + } + } + + *datalen += acmd->cmd_dmacookies[i].dmac_size; + +#if DEBUG + con_log(CL_DLEVEL1, (CE_NOTE, + "[SGL Address]: %" PRIx64, + scsi_raid_io_sgl_ieee->Address)); + con_log(CL_DLEVEL1, (CE_NOTE, + "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length)); + con_log(CL_DLEVEL1, (CE_NOTE, + "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags)); +#endif + + i++; + } + } + + return (0); +} /*end of BuildScatterGather */ + + +/* + * build_cmd + */ +static struct mrsas_cmd * +mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap, + struct scsi_pkt *pkt, uchar_t *cmd_done) +{ + uint8_t fp_possible = 0; + uint32_t index; + uint32_t lba_count = 0; + uint32_t start_lba_hi = 0; + uint32_t start_lba_lo = 0; + ddi_acc_handle_t acc_handle = + instance->mpi2_frame_pool_dma_obj.acc_handle; + struct mrsas_cmd *cmd = NULL; + struct scsa_cmd *acmd = PKT2CMD(pkt); + MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion; + Mpi2RaidSCSIIORequest_t *scsi_raid_io; + uint32_t datalen; + struct IO_REQUEST_INFO io_info; + MR_FW_RAID_MAP_ALL *local_map_ptr; + uint16_t pd_cmd_cdblen; + + con_log(CL_DLEVEL1, (CE_NOTE, + "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__)); + + /* find out if this is logical or physical drive command. */ + acmd->islogical = MRDRV_IS_LOGICAL(ap); + acmd->device_id = MAP_DEVICE_ID(instance, ap); + + *cmd_done = 0; + + /* get the command packet */ + if (!(cmd = get_raid_msg_pkt(instance))) { + return (NULL); + } + + index = cmd->index; + ReqDescUnion = mr_sas_get_request_descriptor(instance, index); + ReqDescUnion->Words = 0; + ReqDescUnion->SCSIIO.SMID = cmd->SMID; + ReqDescUnion->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_LD_IO << + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + + + cmd->request_desc = ReqDescUnion; + cmd->pkt = pkt; + cmd->cmd = acmd; + + /* lets get the command directions */ + if (acmd->cmd_flags & CFLAG_DMASEND) { + if (acmd->cmd_flags & CFLAG_CONSISTENT) { + (void) ddi_dma_sync(acmd->cmd_dmahandle, + acmd->cmd_dma_offset, acmd->cmd_dma_len, + DDI_DMA_SYNC_FORDEV); + } + } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { + if (acmd->cmd_flags & CFLAG_CONSISTENT) { + (void) ddi_dma_sync(acmd->cmd_dmahandle, + acmd->cmd_dma_offset, acmd->cmd_dma_len, + DDI_DMA_SYNC_FORCPU); + } + } else { + con_log(CL_ANN, (CE_NOTE, "NO DMA")); + } + + + /* get SCSI_IO raid message frame pointer */ + scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request; + + /* zero out SCSI_IO raid message frame */ + bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t)); + + /* Set the ldTargetId set by BuildRaidContext() */ + ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId, + acmd->device_id); + + /* Copy CDB to scsi_io_request message frame */ + ddi_rep_put8(acc_handle, + (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32, + acmd->cmd_cdblen, DDI_DEV_AUTOINCR); + + /* + * Just the CDB length, rest of the Flags are zero + * This will be modified later. + */ + ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen); + + pd_cmd_cdblen = acmd->cmd_cdblen; + + switch (pkt->pkt_cdbp[0]) { + case SCMD_READ: + case SCMD_WRITE: + case SCMD_READ_G1: + case SCMD_WRITE_G1: + case SCMD_READ_G4: + case SCMD_WRITE_G4: + case SCMD_READ_G5: + case SCMD_WRITE_G5: + + if (acmd->islogical) { + /* Initialize sense Information */ + if (cmd->sense1 == NULL) { + con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: " + "Sense buffer ptr NULL ")); + } + bzero(cmd->sense1, SENSE_LENGTH); + con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd " + "CDB[0] = %x\n", pkt->pkt_cdbp[0])); + + if (acmd->cmd_cdblen == CDB_GROUP0) { + /* 6-byte cdb */ + lba_count = (uint16_t)(pkt->pkt_cdbp[4]); + start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) | + ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | + ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) + << 16)); + } else if (acmd->cmd_cdblen == CDB_GROUP1) { + /* 10-byte cdb */ + lba_count = + (((uint16_t)(pkt->pkt_cdbp[8])) | + ((uint16_t)(pkt->pkt_cdbp[7]) << 8)); + + start_lba_lo = + (((uint32_t)(pkt->pkt_cdbp[5])) | + ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | + ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | + ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); + + } else if (acmd->cmd_cdblen == CDB_GROUP5) { + /* 12-byte cdb */ + lba_count = ( + ((uint32_t)(pkt->pkt_cdbp[9])) | + ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | + ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | + ((uint32_t)(pkt->pkt_cdbp[6]) << 24)); + + start_lba_lo = + (((uint32_t)(pkt->pkt_cdbp[5])) | + ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | + ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | + ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); + + } else if (acmd->cmd_cdblen == CDB_GROUP4) { + /* 16-byte cdb */ + lba_count = ( + ((uint32_t)(pkt->pkt_cdbp[13])) | + ((uint32_t)(pkt->pkt_cdbp[12]) << 8) | + ((uint32_t)(pkt->pkt_cdbp[11]) << 16) | + ((uint32_t)(pkt->pkt_cdbp[10]) << 24)); + + start_lba_lo = ( + ((uint32_t)(pkt->pkt_cdbp[9])) | + ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | + ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | + ((uint32_t)(pkt->pkt_cdbp[6]) << 24)); + + start_lba_hi = ( + ((uint32_t)(pkt->pkt_cdbp[5])) | + ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | + ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | + ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); + } + + if (instance->tbolt && + ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) { + cmn_err(CE_WARN, " IO SECTOR COUNT exceeds " + "controller limit 0x%x sectors", + lba_count); + } + + bzero(&io_info, sizeof (struct IO_REQUEST_INFO)); + io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | + start_lba_lo; + io_info.numBlocks = lba_count; + io_info.ldTgtId = acmd->device_id; + + if (acmd->cmd_flags & CFLAG_DMASEND) + io_info.isRead = 0; + else + io_info.isRead = 1; + + + /* Acquire SYNC MAP UPDATE lock */ + mutex_enter(&instance->sync_map_mtx); + + local_map_ptr = + instance->ld_map[(instance->map_id & 1)]; + + if ((MR_TargetIdToLdGet( + acmd->device_id, local_map_ptr) >= + MAX_LOGICAL_DRIVES) || !instance->fast_path_io) { + cmn_err(CE_NOTE, "Fast Path NOT Possible, " + "targetId >= MAX_LOGICAL_DRIVES || " + "!instance->fast_path_io"); + fp_possible = 0; + /* Set Regionlock flags to BYPASS */ + /* io_request->RaidContext.regLockFlags = 0; */ + ddi_put8(acc_handle, + &scsi_raid_io->RaidContext.regLockFlags, 0); + } else { + if (MR_BuildRaidContext(instance, &io_info, + &scsi_raid_io->RaidContext, local_map_ptr)) + fp_possible = io_info.fpOkForIo; + } + + if (!enable_fp) + fp_possible = 0; + + con_log(CL_ANN1, (CE_NOTE, "enable_fp %d " + "instance->fast_path_io %d fp_possible %d", + enable_fp, instance->fast_path_io, fp_possible)); + + if (fp_possible) { + + /* Check for DIF enabled LD */ + if (MR_CheckDIF(acmd->device_id, local_map_ptr)) { + /* Prepare 32 Byte CDB for DIF capable Disk */ + mrsas_tbolt_prepare_cdb(instance, + scsi_raid_io->CDB.CDB32, + &io_info, scsi_raid_io, start_lba_lo); + } else { + mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32, + (uint8_t *)&pd_cmd_cdblen, + io_info.pdBlock, io_info.numBlocks); + ddi_put16(acc_handle, + &scsi_raid_io->IoFlags, pd_cmd_cdblen); + } + + ddi_put8(acc_handle, &scsi_raid_io->Function, + MPI2_FUNCTION_SCSI_IO_REQUEST); + + ReqDescUnion->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) { + uint8_t regLockFlags = ddi_get8(acc_handle, + &scsi_raid_io->RaidContext.regLockFlags); + uint16_t IoFlags = ddi_get16(acc_handle, + &scsi_raid_io->IoFlags); + + if (regLockFlags == REGION_TYPE_UNUSED) + ReqDescUnion->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + + IoFlags |= + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; + regLockFlags |= + (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | + MR_RL_FLAGS_SEQ_NUM_ENABLE); + + ddi_put8(acc_handle, + &scsi_raid_io->ChainOffset, 0); + ddi_put8(acc_handle, + &scsi_raid_io->RaidContext.nsegType, + ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | + MPI2_TYPE_CUDA)); + ddi_put8(acc_handle, + &scsi_raid_io->RaidContext.regLockFlags, + regLockFlags); + ddi_put16(acc_handle, + &scsi_raid_io->IoFlags, IoFlags); + } + + if ((instance->load_balance_info[ + acmd->device_id].loadBalanceFlag) && + (io_info.isRead)) { + io_info.devHandle = + get_updated_dev_handle(&instance-> + load_balance_info[acmd->device_id], + &io_info); + cmd->load_balance_flag |= + MEGASAS_LOAD_BALANCE_FLAG; + } else { + cmd->load_balance_flag &= + ~MEGASAS_LOAD_BALANCE_FLAG; + } + + ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle; + ddi_put16(acc_handle, &scsi_raid_io->DevHandle, + io_info.devHandle); + + } else { + ddi_put8(acc_handle, &scsi_raid_io->Function, + MPI2_FUNCTION_LD_IO_REQUEST); + + ddi_put16(acc_handle, + &scsi_raid_io->DevHandle, acmd->device_id); + + ReqDescUnion->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_LD_IO << + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + + ddi_put16(acc_handle, + &scsi_raid_io->RaidContext.timeoutValue, + local_map_ptr->raidMap.fpPdIoTimeoutSec); + + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) { + uint8_t regLockFlags = ddi_get8(acc_handle, + &scsi_raid_io->RaidContext.regLockFlags); + + if (regLockFlags == REGION_TYPE_UNUSED) { + ReqDescUnion->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + } + + regLockFlags |= + (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | + MR_RL_FLAGS_SEQ_NUM_ENABLE); + + ddi_put8(acc_handle, + &scsi_raid_io->RaidContext.nsegType, + ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | + MPI2_TYPE_CUDA)); + ddi_put8(acc_handle, + &scsi_raid_io->RaidContext.regLockFlags, + regLockFlags); + } + } /* Not FP */ + + /* Release SYNC MAP UPDATE lock */ + mutex_exit(&instance->sync_map_mtx); + + + /* + * Set sense buffer physical address/length in scsi_io_request. + */ + ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress, + cmd->sense_phys_addr1); + ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, + SENSE_LENGTH); + + /* Construct SGL */ + ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0, + offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4); + + (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd, + scsi_raid_io, &datalen); + + ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen); + + break; +#ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */ + } else { + break; +#endif + } + /* fall through For all non-rd/wr cmds */ + default: + switch (pkt->pkt_cdbp[0]) { + case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */ + return_raid_msg_pkt(instance, cmd); + *cmd_done = 1; + return (NULL); + } + + case SCMD_MODE_SENSE: + case SCMD_MODE_SENSE_G1: { + union scsi_cdb *cdbp; + uint16_t page_code; + + cdbp = (void *)pkt->pkt_cdbp; + page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0]; + switch (page_code) { + case 0x3: + case 0x4: + (void) mrsas_mode_sense_build(pkt); + return_raid_msg_pkt(instance, cmd); + *cmd_done = 1; + return (NULL); + } + break; + } + + default: { + /* + * Here we need to handle PASSTHRU for + * Logical Devices. Like Inquiry etc. + */ + + if (!(acmd->islogical)) { + + /* Acquire SYNC MAP UPDATE lock */ + mutex_enter(&instance->sync_map_mtx); + + local_map_ptr = + instance->ld_map[(instance->map_id & 1)]; + + ddi_put8(acc_handle, &scsi_raid_io->Function, + MPI2_FUNCTION_SCSI_IO_REQUEST); + + ReqDescUnion->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + + ddi_put16(acc_handle, &scsi_raid_io->DevHandle, + local_map_ptr->raidMap. + devHndlInfo[acmd->device_id].curDevHdl); + + + /* Set regLockFlasgs to REGION_TYPE_BYPASS */ + ddi_put8(acc_handle, + &scsi_raid_io->RaidContext.regLockFlags, 0); + ddi_put64(acc_handle, + &scsi_raid_io->RaidContext.regLockRowLBA, + 0); + ddi_put32(acc_handle, + &scsi_raid_io->RaidContext.regLockLength, + 0); + ddi_put8(acc_handle, + &scsi_raid_io->RaidContext.RAIDFlags, + MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << + MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); + ddi_put16(acc_handle, + &scsi_raid_io->RaidContext.timeoutValue, + local_map_ptr->raidMap.fpPdIoTimeoutSec); + ddi_put16(acc_handle, + &scsi_raid_io->RaidContext.ldTargetId, + acmd->device_id); + ddi_put8(acc_handle, + &scsi_raid_io->LUN[1], acmd->lun); + + /* Release SYNC MAP UPDATE lock */ + mutex_exit(&instance->sync_map_mtx); + + } else { + ddi_put8(acc_handle, &scsi_raid_io->Function, + MPI2_FUNCTION_LD_IO_REQUEST); + ddi_put8(acc_handle, + &scsi_raid_io->LUN[1], acmd->lun); + ddi_put16(acc_handle, + &scsi_raid_io->DevHandle, acmd->device_id); + ReqDescUnion->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + } + + /* + * Set sense buffer physical address/length in + * scsi_io_request. + */ + ddi_put32(acc_handle, + &scsi_raid_io->SenseBufferLowAddress, + cmd->sense_phys_addr1); + ddi_put8(acc_handle, + &scsi_raid_io->SenseBufferLength, SENSE_LENGTH); + + /* Construct SGL */ + ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0, + offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4); + + (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd, + scsi_raid_io, &datalen); + + ddi_put32(acc_handle, + &scsi_raid_io->DataLength, datalen); + + + con_log(CL_ANN, (CE_CONT, + "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n", + pkt->pkt_cdbp[0], acmd->device_id)); + con_log(CL_DLEVEL1, (CE_CONT, + "data length = %x\n", + scsi_raid_io->DataLength)); + con_log(CL_DLEVEL1, (CE_CONT, + "cdb length = %x\n", + acmd->cmd_cdblen)); + } + break; + } + + } + + return (cmd); +} + +/* + * mrsas_tbolt_tran_init_pkt - allocate & initialize a scsi_pkt structure + * @ap: + * @pkt: + * @bp: + * @cmdlen: + * @statuslen: + * @tgtlen: + * @flags: + * @callback: + * + * The tran_init_pkt() entry point allocates and initializes a scsi_pkt + * structure and DMA resources for a target driver request. The + * tran_init_pkt() entry point is called when the target driver calls the + * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point + * is a request to perform one or more of three possible services: + * - allocation and initialization of a scsi_pkt structure + * - allocation of DMA resources for data transfer + * - reallocation of DMA resources for the next portion of the data transfer + */ +struct scsi_pkt * +mrsas_tbolt_tran_init_pkt(struct scsi_address *ap, + register struct scsi_pkt *pkt, + struct buf *bp, int cmdlen, int statuslen, int tgtlen, + int flags, int (*callback)(), caddr_t arg) +{ + struct scsa_cmd *acmd; + struct mrsas_instance *instance; + struct scsi_pkt *new_pkt; + + instance = ADDR2MR(ap); + + /* step #1 : pkt allocation */ + if (pkt == NULL) { + pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, + tgtlen, sizeof (struct scsa_cmd), callback, arg); + if (pkt == NULL) { + return (NULL); + } + + acmd = PKT2CMD(pkt); + + /* + * Initialize the new pkt - we redundantly initialize + * all the fields for illustrative purposes. + */ + acmd->cmd_pkt = pkt; + acmd->cmd_flags = 0; + acmd->cmd_scblen = statuslen; + acmd->cmd_cdblen = cmdlen; + acmd->cmd_dmahandle = NULL; + acmd->cmd_ncookies = 0; + acmd->cmd_cookie = 0; + acmd->cmd_cookiecnt = 0; + acmd->cmd_nwin = 0; + + pkt->pkt_address = *ap; + pkt->pkt_comp = (void (*)())NULL; + pkt->pkt_flags = 0; + pkt->pkt_time = 0; + pkt->pkt_resid = 0; + pkt->pkt_state = 0; + pkt->pkt_statistics = 0; + pkt->pkt_reason = 0; + new_pkt = pkt; + } else { + acmd = PKT2CMD(pkt); + new_pkt = NULL; + } + + /* step #2 : dma allocation/move */ + if (bp && bp->b_bcount != 0) { + if (acmd->cmd_dmahandle == NULL) { + if (mrsas_dma_alloc(instance, pkt, bp, flags, + callback) == DDI_FAILURE) { + if (new_pkt) { + scsi_hba_pkt_free(ap, new_pkt); + } + return ((struct scsi_pkt *)NULL); + } + } else { + if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) { + return ((struct scsi_pkt *)NULL); + } + } + } + return (pkt); +} + + +uint32_t +tbolt_read_fw_status_reg(struct mrsas_instance *instance) +{ + return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance)); +} + +void +tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance) +{ + MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc; + atomic_add_16(&instance->fw_outstanding, 1); + + struct scsi_pkt *pkt; + + con_log(CL_ANN1, + (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID)); + + con_log(CL_DLEVEL1, (CE_CONT, + " [req desc Words] %" PRIx64 " \n", req_desc->Words)); + con_log(CL_DLEVEL1, (CE_CONT, + " [req desc low part] %x \n", + (uint_t)(req_desc->Words & 0xffffffffff))); + con_log(CL_DLEVEL1, (CE_CONT, + " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32))); + pkt = cmd->pkt; + + if (pkt) { + con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:" + "ISSUED CMD TO FW : called : cmd:" + ": %p instance : %p pkt : %p pkt_time : %x\n", + gethrtime(), (void *)cmd, (void *)instance, + (void *)pkt, cmd->drv_pkt_time)); + if (instance->adapterresetinprogress) { + cmd->drv_pkt_time = (uint16_t)debug_timeout_g; + con_log(CL_ANN, (CE_NOTE, + "TBOLT Reset the scsi_pkt timer")); + } else { + push_pending_mfi_pkt(instance, cmd); + } + + } else { + con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:" + "ISSUED CMD TO FW : called : cmd : %p, instance: %p" + "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance)); + } + + /* Issue the command to the FW */ + mutex_enter(&instance->reg_write_mtx); + WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance); + WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance); + mutex_exit(&instance->reg_write_mtx); +} + +/* + * issue_cmd_in_sync_mode + */ +int +tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance, + struct mrsas_cmd *cmd) +{ + int i; + uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; + MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc; + + struct mrsas_header *hdr; + hdr = (struct mrsas_header *)&cmd->frame->hdr; + + con_log(CL_ANN, + (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X", + cmd->SMID)); + + + if (instance->adapterresetinprogress) { + cmd->drv_pkt_time = ddi_get16 + (cmd->frame_dma_obj.acc_handle, &hdr->timeout); + if (cmd->drv_pkt_time < debug_timeout_g) + cmd->drv_pkt_time = (uint16_t)debug_timeout_g; + con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:" + "RESET-IN-PROGRESS, issue cmd & return.")); + + mutex_enter(&instance->reg_write_mtx); + WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance); + WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance); + mutex_exit(&instance->reg_write_mtx); + + return (DDI_SUCCESS); + } else { + con_log(CL_ANN1, (CE_NOTE, + "tbolt_issue_cmd_in_sync_mode: pushing the pkt")); + push_pending_mfi_pkt(instance, cmd); + } + + con_log(CL_DLEVEL2, (CE_NOTE, + "HighQport offset :%p", + (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT))); + con_log(CL_DLEVEL2, (CE_NOTE, + "LowQport offset :%p", + (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT))); + + cmd->sync_cmd = MRSAS_TRUE; + cmd->cmd_status = ENODATA; + + + mutex_enter(&instance->reg_write_mtx); + WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance); + WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance); + mutex_exit(&instance->reg_write_mtx); + + con_log(CL_ANN1, (CE_NOTE, + " req desc high part %x", (uint_t)(req_desc->Words >> 32))); + con_log(CL_ANN1, (CE_NOTE, " req desc low part %x", + (uint_t)(req_desc->Words & 0xffffffff))); + + mutex_enter(&instance->int_cmd_mtx); + for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { + cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); + } + mutex_exit(&instance->int_cmd_mtx); + + + if (i < (msecs -1)) { + return (DDI_SUCCESS); + } else { + return (DDI_FAILURE); + } +} + +/* + * issue_cmd_in_poll_mode + */ +int +tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance, + struct mrsas_cmd *cmd) +{ + int i; + uint16_t flags; + uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; + struct mrsas_header *frame_hdr; + + con_log(CL_ANN, + (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", + cmd->SMID)); + + MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc; + + frame_hdr = (struct mrsas_header *)&cmd->frame->hdr; + ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, + MFI_CMD_STATUS_POLL_MODE); + flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); + flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); + + con_log(CL_ANN1, (CE_NOTE, " req desc low part %x", + (uint_t)(req_desc->Words & 0xffffffff))); + con_log(CL_ANN1, (CE_NOTE, + " req desc high part %x", (uint_t)(req_desc->Words >> 32))); + + /* issue the frame using inbound queue port */ + mutex_enter(&instance->reg_write_mtx); + WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance); + WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance); + mutex_exit(&instance->reg_write_mtx); + + for (i = 0; i < msecs && ( + ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) + == MFI_CMD_STATUS_POLL_MODE); i++) { + /* wait for cmd_status to change from 0xFF */ + drv_usecwait(MILLISEC); /* wait for 1000 usecs */ + } + + if (ddi_get8(cmd->frame_dma_obj.acc_handle, + &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) { + con_log(CL_ANN1, (CE_NOTE, + " cmd failed %" PRIx64, (req_desc->Words))); + return (DDI_FAILURE); + } + + return (DDI_SUCCESS); +} + +void +tbolt_enable_intr(struct mrsas_instance *instance) +{ + /* TODO: For Thunderbolt/Invader also clear intr on enable */ + /* writel(~0, ®s->outbound_intr_status); */ + /* readl(®s->outbound_intr_status); */ + + WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance); + + /* dummy read to force PCI flush */ + (void) RD_OB_INTR_MASK(instance); + +} + +void +tbolt_disable_intr(struct mrsas_instance *instance) +{ + uint32_t mask = 0xFFFFFFFF; + + WR_OB_INTR_MASK(mask, instance); + + /* Dummy readl to force pci flush */ + + (void) RD_OB_INTR_MASK(instance); +} + + +int +tbolt_intr_ack(struct mrsas_instance *instance) +{ + uint32_t status; + + /* check if it is our interrupt */ + status = RD_OB_INTR_STATUS(instance); + con_log(CL_ANN1, (CE_NOTE, + "chkpnt: Entered tbolt_intr_ack status = %d", status)); + + if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) { + return (DDI_INTR_UNCLAIMED); + } + + if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { + ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); + return (DDI_INTR_UNCLAIMED); + } + + if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) { + /* clear the interrupt by writing back the same value */ + WR_OB_INTR_STATUS(status, instance); + /* dummy READ */ + (void) RD_OB_INTR_STATUS(instance); + } + return (DDI_INTR_CLAIMED); +} + +/* + * get_raid_msg_pkt : Get a command from the free pool + * After successful allocation, the caller of this routine + * must clear the frame buffer (memset to zero) before + * using the packet further. + * + * ***** Note ***** + * After clearing the frame buffer the context id of the + * frame buffer SHOULD be restored back. + */ + +struct mrsas_cmd * +get_raid_msg_pkt(struct mrsas_instance *instance) +{ + mlist_t *head = &instance->cmd_pool_list; + struct mrsas_cmd *cmd = NULL; + + mutex_enter(&instance->cmd_pool_mtx); + ASSERT(mutex_owned(&instance->cmd_pool_mtx)); + + + if (!mlist_empty(head)) { + cmd = mlist_entry(head->next, struct mrsas_cmd, list); + mlist_del_init(head->next); + } + if (cmd != NULL) { + cmd->pkt = NULL; + cmd->retry_count_for_ocr = 0; + cmd->drv_pkt_time = 0; + } + mutex_exit(&instance->cmd_pool_mtx); + + if (cmd != NULL) + bzero(cmd->scsi_io_request, + sizeof (Mpi2RaidSCSIIORequest_t)); + return (cmd); +} + +struct mrsas_cmd * +get_raid_msg_mfi_pkt(struct mrsas_instance *instance) +{ + mlist_t *head = &instance->cmd_app_pool_list; + struct mrsas_cmd *cmd = NULL; + + mutex_enter(&instance->cmd_app_pool_mtx); + ASSERT(mutex_owned(&instance->cmd_app_pool_mtx)); + + if (!mlist_empty(head)) { + cmd = mlist_entry(head->next, struct mrsas_cmd, list); + mlist_del_init(head->next); + } + if (cmd != NULL) { + cmd->retry_count_for_ocr = 0; + cmd->drv_pkt_time = 0; + cmd->pkt = NULL; + cmd->request_desc = NULL; + + } + + mutex_exit(&instance->cmd_app_pool_mtx); + + if (cmd != NULL) { + bzero(cmd->scsi_io_request, + sizeof (Mpi2RaidSCSIIORequest_t)); + } + + return (cmd); +} + +/* + * return_raid_msg_pkt : Return a cmd to free command pool + */ +void +return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) +{ + mutex_enter(&instance->cmd_pool_mtx); + ASSERT(mutex_owned(&instance->cmd_pool_mtx)); + + + mlist_add_tail(&cmd->list, &instance->cmd_pool_list); + + mutex_exit(&instance->cmd_pool_mtx); +} + +void +return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) +{ + mutex_enter(&instance->cmd_app_pool_mtx); + ASSERT(mutex_owned(&instance->cmd_app_pool_mtx)); + + mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list); + + mutex_exit(&instance->cmd_app_pool_mtx); +} + + +void +mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance, + struct mrsas_cmd *cmd) +{ + Mpi2RaidSCSIIORequest_t *scsi_raid_io; + Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee; + MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion; + uint32_t index; + ddi_acc_handle_t acc_handle = + instance->mpi2_frame_pool_dma_obj.acc_handle; + + if (!instance->tbolt) { + con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.")); + return; + } + + index = cmd->index; + + ReqDescUnion = mr_sas_get_request_descriptor(instance, index); + + if (!ReqDescUnion) { + con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]")); + return; + } + + con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID)); + + ReqDescUnion->Words = 0; + + ReqDescUnion->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + + ReqDescUnion->SCSIIO.SMID = cmd->SMID; + + cmd->request_desc = ReqDescUnion; + + /* get raid message frame pointer */ + scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request; + + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) { + Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *) + &scsi_raid_io->SGL.IeeeChain; + sgl_ptr_end += instance->max_sge_in_main_msg - 1; + ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0); + } + + ddi_put8(acc_handle, &scsi_raid_io->Function, + MPI2_FUNCTION_PASSTHRU_IO_REQUEST); + + ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0, + offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4); + + ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, + (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16); + + ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress, + cmd->sense_phys_addr1); + + + scsi_raid_io_sgl_ieee = + (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain; + + ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address, + (U64)cmd->frame_phys_addr); + + ddi_put8(acc_handle, + &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR)); + /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */ + ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024); + + con_log(CL_ANN1, (CE_NOTE, + "[MFI CMD PHY ADDRESS]:%" PRIx64, + scsi_raid_io_sgl_ieee->Address)); + con_log(CL_ANN1, (CE_NOTE, + "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length)); + con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x", + scsi_raid_io_sgl_ieee->Flags)); +} + + +void +tbolt_complete_cmd(struct mrsas_instance *instance, + struct mrsas_cmd *cmd) +{ + uint8_t status; + uint8_t extStatus; + uint8_t arm; + struct scsa_cmd *acmd; + struct scsi_pkt *pkt; + struct scsi_arq_status *arqstat; + Mpi2RaidSCSIIORequest_t *scsi_raid_io; + LD_LOAD_BALANCE_INFO *lbinfo; + ddi_acc_handle_t acc_handle = + instance->mpi2_frame_pool_dma_obj.acc_handle; + + scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request; + + status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status); + extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus); + + con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status)); + con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus)); + + if (status != MFI_STAT_OK) { + con_log(CL_ANN, (CE_WARN, + "IO Cmd Failed SMID %x", cmd->SMID)); + } else { + con_log(CL_ANN, (CE_NOTE, + "IO Cmd Success SMID %x", cmd->SMID)); + } + + /* regular commands */ + + switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) { + + case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */ + acmd = (struct scsa_cmd *)cmd->cmd; + lbinfo = &instance->load_balance_info[acmd->device_id]; + + if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) { + arm = lbinfo->raid1DevHandle[0] == + scsi_raid_io->DevHandle ? 0 : 1; + + lbinfo->scsi_pending_cmds[arm]--; + cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG; + } + con_log(CL_DLEVEL3, (CE_NOTE, + "FastPath IO Completion Success ")); + /* FALLTHRU */ + + case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */ + acmd = (struct scsa_cmd *)cmd->cmd; + pkt = (struct scsi_pkt *)CMD2PKT(acmd); + + if (acmd->cmd_flags & CFLAG_DMAVALID) { + if (acmd->cmd_flags & CFLAG_CONSISTENT) { + (void) ddi_dma_sync(acmd->cmd_dmahandle, + acmd->cmd_dma_offset, acmd->cmd_dma_len, + DDI_DMA_SYNC_FORCPU); + } + } + + pkt->pkt_reason = CMD_CMPLT; + pkt->pkt_statistics = 0; + pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | + STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; + + con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: " + "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0], + ((acmd->islogical) ? "LD" : "PD"), + acmd->cmd_dmacount, cmd->SMID, status)); + + if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { + struct scsi_inquiry *inq; + + if (acmd->cmd_dmacount != 0) { + bp_mapin(acmd->cmd_buf); + inq = (struct scsi_inquiry *) + acmd->cmd_buf->b_un.b_addr; + + /* don't expose physical drives to OS */ + if (acmd->islogical && + (status == MFI_STAT_OK)) { + display_scsi_inquiry((caddr_t)inq); +#ifdef PDSUPPORT + } else if ((status == MFI_STAT_OK) && + inq->inq_dtype == DTYPE_DIRECT) { + display_scsi_inquiry((caddr_t)inq); +#endif + } else { + /* for physical disk */ + status = MFI_STAT_DEVICE_NOT_FOUND; + } + } + } + + switch (status) { + case MFI_STAT_OK: + pkt->pkt_scbp[0] = STATUS_GOOD; + break; + case MFI_STAT_LD_CC_IN_PROGRESS: + case MFI_STAT_LD_RECON_IN_PROGRESS: + pkt->pkt_scbp[0] = STATUS_GOOD; + break; + case MFI_STAT_LD_INIT_IN_PROGRESS: + pkt->pkt_reason = CMD_TRAN_ERR; + break; + case MFI_STAT_SCSI_IO_FAILED: + cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed"); + pkt->pkt_reason = CMD_TRAN_ERR; + break; + case MFI_STAT_SCSI_DONE_WITH_ERROR: + con_log(CL_ANN, (CE_WARN, + "tbolt_complete_cmd: scsi_done with error")); + + pkt->pkt_reason = CMD_CMPLT; + ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; + + if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { + con_log(CL_ANN, + (CE_WARN, "TEST_UNIT_READY fail")); + } else { + pkt->pkt_state |= STATE_ARQ_DONE; + arqstat = (void *)(pkt->pkt_scbp); + arqstat->sts_rqpkt_reason = CMD_CMPLT; + arqstat->sts_rqpkt_resid = 0; + arqstat->sts_rqpkt_state |= + STATE_GOT_BUS | STATE_GOT_TARGET + | STATE_SENT_CMD + | STATE_XFERRED_DATA; + *(uint8_t *)&arqstat->sts_rqpkt_status = + STATUS_GOOD; + con_log(CL_ANN1, + (CE_NOTE, "Copying Sense data %x", + cmd->SMID)); + + ddi_rep_get8(acc_handle, + (uint8_t *)&(arqstat->sts_sensedata), + cmd->sense1, + sizeof (struct scsi_extended_sense), + DDI_DEV_AUTOINCR); + + } + break; + case MFI_STAT_LD_OFFLINE: + cmn_err(CE_WARN, + "tbolt_complete_cmd: ld offline " + "CDB[0]=0x%x targetId=0x%x devhandle=0x%x", + /* UNDO: */ + ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]), + + ddi_get16(acc_handle, + &scsi_raid_io->RaidContext.ldTargetId), + + ddi_get16(acc_handle, &scsi_raid_io->DevHandle)); + + pkt->pkt_reason = CMD_DEV_GONE; + pkt->pkt_statistics = STAT_DISCON; + break; + case MFI_STAT_DEVICE_NOT_FOUND: + con_log(CL_ANN, (CE_CONT, + "tbolt_complete_cmd: device not found error")); + pkt->pkt_reason = CMD_DEV_GONE; + pkt->pkt_statistics = STAT_DISCON; + break; + + case MFI_STAT_LD_LBA_OUT_OF_RANGE: + pkt->pkt_state |= STATE_ARQ_DONE; + pkt->pkt_reason = CMD_CMPLT; + ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; + + arqstat = (void *)(pkt->pkt_scbp); + arqstat->sts_rqpkt_reason = CMD_CMPLT; + arqstat->sts_rqpkt_resid = 0; + arqstat->sts_rqpkt_state |= STATE_GOT_BUS + | STATE_GOT_TARGET | STATE_SENT_CMD + | STATE_XFERRED_DATA; + *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; + + arqstat->sts_sensedata.es_valid = 1; + arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST; + arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; + + /* + * LOGICAL BLOCK ADDRESS OUT OF RANGE: + * ASC: 0x21h; ASCQ: 0x00h; + */ + arqstat->sts_sensedata.es_add_code = 0x21; + arqstat->sts_sensedata.es_qual_code = 0x00; + break; + case MFI_STAT_INVALID_CMD: + case MFI_STAT_INVALID_DCMD: + case MFI_STAT_INVALID_PARAMETER: + case MFI_STAT_INVALID_SEQUENCE_NUMBER: + default: + cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!"); + pkt->pkt_reason = CMD_TRAN_ERR; + + break; + } + + atomic_add_16(&instance->fw_outstanding, (-1)); + + (void) mrsas_common_check(instance, cmd); + if (acmd->cmd_dmahandle) { + if (mrsas_check_dma_handle(acmd->cmd_dmahandle) != + DDI_SUCCESS) { + ddi_fm_service_impact(instance->dip, + DDI_SERVICE_UNAFFECTED); + pkt->pkt_reason = CMD_TRAN_ERR; + pkt->pkt_statistics = 0; + } + } + + /* Call the callback routine */ + if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) + (*pkt->pkt_comp)(pkt); + + con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID)); + + ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0); + + ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0); + + return_raid_msg_pkt(instance, cmd); + break; + } + case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */ + + if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO && + cmd->frame->dcmd.mbox.b[1] == 1) { + + mutex_enter(&instance->sync_map_mtx); + + con_log(CL_ANN, (CE_NOTE, + "LDMAP sync command SMID RECEIVED 0x%X", + cmd->SMID)); + if (cmd->frame->hdr.cmd_status != 0) { + cmn_err(CE_WARN, + "map sync failed, status = 0x%x.", + cmd->frame->hdr.cmd_status); + } else { + instance->map_id++; + cmn_err(CE_NOTE, + "map sync received, switched map_id to %" + PRIu64 " \n", instance->map_id); + } + + if (MR_ValidateMapInfo(instance->ld_map[ + (instance->map_id & 1)], + instance->load_balance_info)) { + instance->fast_path_io = 1; + } else { + instance->fast_path_io = 0; + } + + con_log(CL_ANN, (CE_NOTE, + "instance->fast_path_io %d", + instance->fast_path_io)); + + instance->unroll.syncCmd = 0; + + if (instance->map_update_cmd == cmd) { + return_raid_msg_pkt(instance, cmd); + atomic_add_16(&instance->fw_outstanding, (-1)); + (void) mrsas_tbolt_sync_map_info(instance); + } + + cmn_err(CE_NOTE, "LDMAP sync completed."); + mutex_exit(&instance->sync_map_mtx); + break; + } + + if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) { + con_log(CL_ANN1, (CE_CONT, + "AEN command SMID RECEIVED 0x%X", + cmd->SMID)); + if ((instance->aen_cmd == cmd) && + (instance->aen_cmd->abort_aen)) { + con_log(CL_ANN, (CE_WARN, "mrsas_softintr: " + "aborted_aen returned")); + } else { + atomic_add_16(&instance->fw_outstanding, (-1)); + service_mfi_aen(instance, cmd); + } + } + + if (cmd->sync_cmd == MRSAS_TRUE) { + con_log(CL_ANN1, (CE_CONT, + "Sync-mode Command Response SMID RECEIVED 0x%X", + cmd->SMID)); + + tbolt_complete_cmd_in_sync_mode(instance, cmd); + } else { + con_log(CL_ANN, (CE_CONT, + "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X", + cmd->SMID)); + } + break; + default: + mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); + ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); + + /* free message */ + con_log(CL_ANN, + (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!")); + break; + } +} + +uint_t +mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance) +{ + uint8_t replyType; + Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc; + Mpi2ReplyDescriptorsUnion_t *desc; + uint16_t smid; + union desc_value d_val; + struct mrsas_cmd *cmd; + + struct mrsas_header *hdr; + struct scsi_pkt *pkt; + + (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle, + 0, 0, DDI_DMA_SYNC_FORDEV); + + (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle, + 0, 0, DDI_DMA_SYNC_FORCPU); + + desc = instance->reply_frame_pool; + desc += instance->reply_read_index; + + replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; + replyType = replyDesc->ReplyFlags & + MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + + if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) + return (DDI_INTR_UNCLAIMED); + + if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) + != DDI_SUCCESS) { + mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); + ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); + con_log(CL_ANN1, + (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): " + "FMA check, returning DDI_INTR_UNCLAIMED")); + return (DDI_INTR_CLAIMED); + } + + con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64, + (void *)desc, desc->Words)); + + d_val.word = desc->Words; + + + /* Read Reply descriptor */ + while ((d_val.u1.low != 0xffffffff) && + (d_val.u1.high != 0xffffffff)) { + + (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle, + 0, 0, DDI_DMA_SYNC_FORCPU); + + smid = replyDesc->SMID; + + if (!smid || smid > instance->max_fw_cmds + 1) { + con_log(CL_ANN1, (CE_NOTE, + "Reply Desc at Break = %p Words = %" PRIx64, + (void *)desc, desc->Words)); + break; + } + + cmd = instance->cmd_list[smid - 1]; + if (!cmd) { + con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_" + "outstanding_cmd: Invalid command " + " or Poll commad Received in completion path")); + } else { + mutex_enter(&instance->cmd_pend_mtx); + if (cmd->sync_cmd == MRSAS_TRUE) { + hdr = (struct mrsas_header *)&cmd->frame->hdr; + if (hdr) { + con_log(CL_ANN1, (CE_NOTE, "mr_sas_" + "tbolt_process_outstanding_cmd:" + " mlist_del_init(&cmd->list).")); + mlist_del_init(&cmd->list); + } + } else { + pkt = cmd->pkt; + if (pkt) { + con_log(CL_ANN1, (CE_NOTE, "mr_sas_" + "tbolt_process_outstanding_cmd:" + "mlist_del_init(&cmd->list).")); + mlist_del_init(&cmd->list); + } + } + + mutex_exit(&instance->cmd_pend_mtx); + + tbolt_complete_cmd(instance, cmd); + } + /* set it back to all 1s. */ + desc->Words = -1LL; + + instance->reply_read_index++; + + if (instance->reply_read_index >= (instance->reply_q_depth)) { + con_log(CL_ANN1, (CE_NOTE, "wrap around")); + instance->reply_read_index = 0; + } + + /* Get the next reply descriptor */ + if (!instance->reply_read_index) + desc = instance->reply_frame_pool; + else + desc++; + + replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; + + d_val.word = desc->Words; + + con_log(CL_ANN1, (CE_NOTE, + "Next Reply Desc = %p Words = %" PRIx64, + (void *)desc, desc->Words)); + + replyType = replyDesc->ReplyFlags & + MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + + if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) + break; + + } /* End of while loop. */ + + /* update replyIndex to FW */ + WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance); + + + (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle, + 0, 0, DDI_DMA_SYNC_FORDEV); + + (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle, + 0, 0, DDI_DMA_SYNC_FORCPU); + return (DDI_INTR_CLAIMED); +} + + + + +/* + * complete_cmd_in_sync_mode - Completes an internal command + * @instance: Adapter soft state + * @cmd: Command to be completed + * + * The issue_cmd_in_sync_mode() function waits for a command to complete + * after it issues a command. This function wakes up that waiting routine by + * calling wake_up() on the wait queue. + */ +void +tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance, + struct mrsas_cmd *cmd) +{ + + cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle, + &cmd->frame->io.cmd_status); + + cmd->sync_cmd = MRSAS_FALSE; + + mutex_enter(&instance->int_cmd_mtx); + if (cmd->cmd_status == ENODATA) { + cmd->cmd_status = 0; + } + cv_broadcast(&instance->int_cmd_cv); + mutex_exit(&instance->int_cmd_mtx); + +} + +/* + * mrsas_tbolt_get_ld_map_info - Returns ld_map structure + * instance: Adapter soft state + * + * Issues an internal command (DCMD) to get the FW's controller PD + * list structure. This information is mainly used to find out SYSTEM + * supported by the FW. + */ +int +mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance) +{ + int ret = 0; + struct mrsas_cmd *cmd = NULL; + struct mrsas_dcmd_frame *dcmd; + MR_FW_RAID_MAP_ALL *ci; + uint32_t ci_h = 0; + U32 size_map_info; + + cmd = get_raid_msg_pkt(instance); + + if (cmd == NULL) { + cmn_err(CE_WARN, + "Failed to get a cmd from free-pool in get_ld_map_info()"); + return (DDI_FAILURE); + } + + dcmd = &cmd->frame->dcmd; + + size_map_info = sizeof (MR_FW_RAID_MAP) + + (sizeof (MR_LD_SPAN_MAP) * + (MAX_LOGICAL_DRIVES - 1)); + + con_log(CL_ANN, (CE_NOTE, + "size_map_info : 0x%x", size_map_info)); + + ci = instance->ld_map[(instance->map_id & 1)]; + ci_h = instance->ld_map_phy[(instance->map_id & 1)]; + + if (!ci) { + cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info"); + return_raid_msg_pkt(instance, cmd); + return (-1); + } + + bzero(ci, sizeof (*ci)); + bzero(dcmd->mbox.b, DCMD_MBOX_SZ); + + dcmd->cmd = MFI_CMD_OP_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = size_map_info; + dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; + dcmd->sgl.sge32[0].phys_addr = ci_h; + dcmd->sgl.sge32[0].length = size_map_info; + + + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + + if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { + ret = 0; + con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success")); + } else { + cmn_err(CE_WARN, "Get LD Map Info failed"); + ret = -1; + } + + return_raid_msg_pkt(instance, cmd); + + return (ret); +} + +void +mrsas_dump_reply_desc(struct mrsas_instance *instance) +{ + uint32_t i; + MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; + union desc_value d_val; + + reply_desc = instance->reply_frame_pool; + + for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) { + d_val.word = reply_desc->Words; + con_log(CL_DLEVEL3, (CE_NOTE, + "i=%d, %x:%x", + i, d_val.u1.high, d_val.u1.low)); + } +} + +/* + * mrsas_tbolt_command_create - Create command for fast path. + * @io_info: MegaRAID IO request packet pointer. + * @ref_tag: Reference tag for RD/WRPROTECT + * + * Create the command for fast path. + */ +void +mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[], + struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request, + U32 ref_tag) +{ + uint16_t EEDPFlags; + uint32_t Control; + ddi_acc_handle_t acc_handle = + instance->mpi2_frame_pool_dma_obj.acc_handle; + + /* Prepare 32-byte CDB if DIF is supported on this device */ + con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB")); + + bzero(cdb, 32); + + cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD; + + + cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN; + + if (io_info->isRead) + cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32; + else + cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32; + + /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */ + cdb[10] = MRSAS_RD_WR_PROTECT; + + /* LOGICAL BLOCK ADDRESS */ + cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff); + cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff); + cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff); + cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff); + cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff); + cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff); + cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff); + cdb[19] = (U8)((io_info->pdBlock) & 0xff); + + /* Logical block reference tag */ + ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag, + BE_32(ref_tag)); + + ddi_put16(acc_handle, + &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff); + + ddi_put32(acc_handle, &scsi_io_request->DataLength, + ((io_info->numBlocks)*512)); + /* Specify 32-byte cdb */ + ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32); + + /* Transfer length */ + cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff); + cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff); + cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff); + cdb[31] = (U8)((io_info->numBlocks) & 0xff); + + /* set SCSI IO EEDPFlags */ + EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags); + Control = ddi_get32(acc_handle, &scsi_io_request->Control); + + /* set SCSI IO EEDPFlags bits */ + if (io_info->isRead) { + /* + * For READ commands, the EEDPFlags shall be set to specify to + * Increment the Primary Reference Tag, to Check the Reference + * Tag, and to Check and Remove the Protection Information + * fields. + */ + EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | + MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + } else { + /* + * For WRITE commands, the EEDPFlags shall be set to specify to + * Increment the Primary Reference Tag, and to Insert + * Protection Information fields. + */ + EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + } + Control |= (0x4 << 26); + + ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags); + ddi_put32(acc_handle, &scsi_io_request->Control, Control); + ddi_put32(acc_handle, + &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE); +} + + +/* + * mrsas_tbolt_set_pd_lba - Sets PD LBA + * @cdb: CDB + * @cdb_len: cdb length + * @start_blk: Start block of IO + * + * Used to set the PD LBA in CDB for FP IOs + */ +static void +mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk, + U32 num_blocks) +{ + U8 cdb_len = *cdb_len_ptr; + U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0; + + /* Some drives don't support 16/12 byte CDB's, convert to 10 */ + if (((cdb_len == 12) || (cdb_len == 16)) && + (start_blk <= 0xffffffff)) { + if (cdb_len == 16) { + con_log(CL_ANN, + (CE_NOTE, "Converting READ/WRITE(16) to READ10")); + opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; + flagvals = cdb[1]; + groupnum = cdb[14]; + control = cdb[15]; + } else { + con_log(CL_ANN, + (CE_NOTE, "Converting READ/WRITE(12) to READ10")); + opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; + flagvals = cdb[1]; + groupnum = cdb[10]; + control = cdb[11]; + } + + bzero(cdb, sizeof (cdb)); + + cdb[0] = opcode; + cdb[1] = flagvals; + cdb[6] = groupnum; + cdb[9] = control; + /* Set transfer length */ + cdb[8] = (U8)(num_blocks & 0xff); + cdb[7] = (U8)((num_blocks >> 8) & 0xff); + cdb_len = 10; + } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { + /* Convert to 16 byte CDB for large LBA's */ + con_log(CL_ANN, + (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB")); + switch (cdb_len) { + case 6: + opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; + control = cdb[5]; + break; + case 10: + opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; + flagvals = cdb[1]; + groupnum = cdb[6]; + control = cdb[9]; + break; + case 12: + opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; + flagvals = cdb[1]; + groupnum = cdb[10]; + control = cdb[11]; + break; + } + + bzero(cdb, sizeof (cdb)); + + cdb[0] = opcode; + cdb[1] = flagvals; + cdb[14] = groupnum; + cdb[15] = control; + + /* Transfer length */ + cdb[13] = (U8)(num_blocks & 0xff); + cdb[12] = (U8)((num_blocks >> 8) & 0xff); + cdb[11] = (U8)((num_blocks >> 16) & 0xff); + cdb[10] = (U8)((num_blocks >> 24) & 0xff); + + /* Specify 16-byte cdb */ + cdb_len = 16; + } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) { + /* convert to 10 byte CDB */ + opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10; + control = cdb[5]; + + bzero(cdb, sizeof (cdb)); + cdb[0] = opcode; + cdb[9] = control; + + /* Set transfer length */ + cdb[8] = (U8)(num_blocks & 0xff); + cdb[7] = (U8)((num_blocks >> 8) & 0xff); + + /* Specify 10-byte cdb */ + cdb_len = 10; + } + + + /* Fall through Normal case, just load LBA here */ + switch (cdb_len) { + case 6: + { + U8 val = cdb[1] & 0xE0; + cdb[3] = (U8)(start_blk & 0xff); + cdb[2] = (U8)((start_blk >> 8) & 0xff); + cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f); + break; + } + case 10: + cdb[5] = (U8)(start_blk & 0xff); + cdb[4] = (U8)((start_blk >> 8) & 0xff); + cdb[3] = (U8)((start_blk >> 16) & 0xff); + cdb[2] = (U8)((start_blk >> 24) & 0xff); + break; + case 12: + cdb[5] = (U8)(start_blk & 0xff); + cdb[4] = (U8)((start_blk >> 8) & 0xff); + cdb[3] = (U8)((start_blk >> 16) & 0xff); + cdb[2] = (U8)((start_blk >> 24) & 0xff); + break; + + case 16: + cdb[9] = (U8)(start_blk & 0xff); + cdb[8] = (U8)((start_blk >> 8) & 0xff); + cdb[7] = (U8)((start_blk >> 16) & 0xff); + cdb[6] = (U8)((start_blk >> 24) & 0xff); + cdb[5] = (U8)((start_blk >> 32) & 0xff); + cdb[4] = (U8)((start_blk >> 40) & 0xff); + cdb[3] = (U8)((start_blk >> 48) & 0xff); + cdb[2] = (U8)((start_blk >> 56) & 0xff); + break; + } + + *cdb_len_ptr = cdb_len; +} + + +static int +mrsas_tbolt_check_map_info(struct mrsas_instance *instance) +{ + MR_FW_RAID_MAP_ALL *ld_map; + + if (!mrsas_tbolt_get_ld_map_info(instance)) { + + ld_map = instance->ld_map[(instance->map_id & 1)]; + + con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d", + ld_map->raidMap.ldCount, ld_map->raidMap.totalSize)); + + if (MR_ValidateMapInfo(instance->ld_map[ + (instance->map_id & 1)], instance->load_balance_info)) { + con_log(CL_ANN, + (CE_CONT, "MR_ValidateMapInfo success")); + + instance->fast_path_io = 1; + con_log(CL_ANN, + (CE_NOTE, "instance->fast_path_io %d", + instance->fast_path_io)); + + return (DDI_SUCCESS); + } + + } + + instance->fast_path_io = 0; + cmn_err(CE_WARN, "MR_ValidateMapInfo failed"); + con_log(CL_ANN, (CE_NOTE, + "instance->fast_path_io %d", instance->fast_path_io)); + + return (DDI_FAILURE); +} + +/* + * Marks HBA as bad. This will be called either when an + * IO packet times out even after 3 FW resets + * or FW is found to be fault even after 3 continuous resets. + */ + +void +mrsas_tbolt_kill_adapter(struct mrsas_instance *instance) +{ + cmn_err(CE_NOTE, "TBOLT Kill adapter called"); + + if (instance->deadadapter == 1) + return; + + con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: " + "Writing to doorbell with MFI_STOP_ADP ")); + mutex_enter(&instance->ocr_flags_mtx); + instance->deadadapter = 1; + mutex_exit(&instance->ocr_flags_mtx); + instance->func_ptr->disable_intr(instance); + WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance); + /* Flush */ + (void) RD_RESERVED0_REGISTER(instance); + + (void) mrsas_print_pending_cmds(instance); + (void) mrsas_complete_pending_cmds(instance); +} + +void +mrsas_reset_reply_desc(struct mrsas_instance *instance) +{ + int i; + MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; + instance->reply_read_index = 0; + + /* initializing reply address to 0xFFFFFFFF */ + reply_desc = instance->reply_frame_pool; + + for (i = 0; i < instance->reply_q_depth; i++) { + reply_desc->Words = (uint64_t)~0; + reply_desc++; + } +} + +int +mrsas_tbolt_reset_ppc(struct mrsas_instance *instance) +{ + uint32_t status = 0x00; + uint32_t retry = 0; + uint32_t cur_abs_reg_val; + uint32_t fw_state; + uint32_t abs_state; + uint32_t i; + + con_log(CL_ANN, (CE_NOTE, + "mrsas_tbolt_reset_ppc entered")); + + if (instance->deadadapter == 1) { + cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: " + "no more resets as HBA has been marked dead "); + return (DDI_FAILURE); + } + + mutex_enter(&instance->ocr_flags_mtx); + instance->adapterresetinprogress = 1; + con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:" + "adpterresetinprogress flag set, time %llx", gethrtime())); + mutex_exit(&instance->ocr_flags_mtx); + + instance->func_ptr->disable_intr(instance); + + /* Add delay inorder to complete the ioctl & io cmds in-flight */ + for (i = 0; i < 3000; i++) { + drv_usecwait(MILLISEC); /* wait for 1000 usecs */ + } + + instance->reply_read_index = 0; + +retry_reset: + con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: " + ":Resetting TBOLT ")); + + WR_TBOLT_IB_WRITE_SEQ(0xF, instance); + WR_TBOLT_IB_WRITE_SEQ(4, instance); + WR_TBOLT_IB_WRITE_SEQ(0xb, instance); + WR_TBOLT_IB_WRITE_SEQ(2, instance); + WR_TBOLT_IB_WRITE_SEQ(7, instance); + WR_TBOLT_IB_WRITE_SEQ(0xd, instance); + con_log(CL_ANN1, (CE_NOTE, + "mrsas_tbolt_reset_ppc: magic number written " + "to write sequence register")); + delay(100 * drv_usectohz(MILLISEC)); + status = RD_TBOLT_HOST_DIAG(instance); + con_log(CL_ANN1, (CE_NOTE, + "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS " + "to write sequence register")); + + while (status & DIAG_TBOLT_RESET_ADAPTER) { + delay(100 * drv_usectohz(MILLISEC)); + status = RD_TBOLT_HOST_DIAG(instance); + if (retry++ == 100) { + cmn_err(CE_WARN, + "mrsas_tbolt_reset_ppc:" + "resetadapter bit is set already " + "check retry count %d", retry); + return (DDI_FAILURE); + } + } + + WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance); + delay(100 * drv_usectohz(MILLISEC)); + + ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status, + (uint8_t *)((uintptr_t)(instance)->regmap + + RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR); + + while ((status & DIAG_TBOLT_RESET_ADAPTER)) { + delay(100 * drv_usectohz(MILLISEC)); + ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status, + (uint8_t *)((uintptr_t)(instance)->regmap + + RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR); + if (retry++ == 100) { + /* Dont call kill adapter here */ + /* RESET BIT ADAPTER is cleared by firmare */ + /* mrsas_tbolt_kill_adapter(instance); */ + cmn_err(CE_WARN, + "mr_sas %d: %s(): RESET FAILED; return failure!!!", + instance->instance, __func__); + return (DDI_FAILURE); + } + } + + con_log(CL_ANN, + (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete")); + con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: " + "Calling mfi_state_transition_to_ready")); + + abs_state = instance->func_ptr->read_fw_status_reg(instance); + retry = 0; + while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { + delay(100 * drv_usectohz(MILLISEC)); + abs_state = instance->func_ptr->read_fw_status_reg(instance); + } + if (abs_state <= MFI_STATE_FW_INIT) { + cmn_err(CE_WARN, + "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT" + "state = 0x%x, RETRY RESET.", abs_state); + goto retry_reset; + } + + /* Mark HBA as bad, if FW is fault after 3 continuous resets */ + if (mfi_state_transition_to_ready(instance) || + debug_tbolt_fw_faults_after_ocr_g == 1) { + cur_abs_reg_val = + instance->func_ptr->read_fw_status_reg(instance); + fw_state = cur_abs_reg_val & MFI_STATE_MASK; + + con_log(CL_ANN1, (CE_NOTE, + "mrsas_tbolt_reset_ppc :before fake: FW is not ready " + "FW state = 0x%x", fw_state)); + if (debug_tbolt_fw_faults_after_ocr_g == 1) + fw_state = MFI_STATE_FAULT; + + con_log(CL_ANN, + (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready " + "FW state = 0x%x", fw_state)); + + if (fw_state == MFI_STATE_FAULT) { + /* increment the count */ + instance->fw_fault_count_after_ocr++; + if (instance->fw_fault_count_after_ocr + < MAX_FW_RESET_COUNT) { + cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: " + "FW is in fault after OCR count %d " + "Retry Reset", + instance->fw_fault_count_after_ocr); + goto retry_reset; + + } else { + cmn_err(CE_WARN, "mrsas %d: %s:" + "Max Reset Count exceeded >%d" + "Mark HBA as bad, KILL adapter", + instance->instance, __func__, + MAX_FW_RESET_COUNT); + + mrsas_tbolt_kill_adapter(instance); + return (DDI_FAILURE); + } + } + } + + /* reset the counter as FW is up after OCR */ + instance->fw_fault_count_after_ocr = 0; + + mrsas_reset_reply_desc(instance); + + + con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: " + "Calling mrsas_issue_init_mpi2")); + abs_state = mrsas_issue_init_mpi2(instance); + if (abs_state == (uint32_t)DDI_FAILURE) { + cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: " + "INIT failed Retrying Reset"); + goto retry_reset; + } + con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: " + "mrsas_issue_init_mpi2 Done")); + + con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: " + "Calling mrsas_print_pending_cmd")); + (void) mrsas_print_pending_cmds(instance); + con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: " + "mrsas_print_pending_cmd done")); + + instance->func_ptr->enable_intr(instance); + instance->fw_outstanding = 0; + + con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: " + "Calling mrsas_issue_pending_cmds")); + (void) mrsas_issue_pending_cmds(instance); + con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: " + "issue_pending_cmds done.")); + + con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: " + "Calling aen registration")); + + instance->aen_cmd->retry_count_for_ocr = 0; + instance->aen_cmd->drv_pkt_time = 0; + + instance->func_ptr->issue_cmd(instance->aen_cmd, instance); + + con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.")); + mutex_enter(&instance->ocr_flags_mtx); + instance->adapterresetinprogress = 0; + mutex_exit(&instance->ocr_flags_mtx); + con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: " + "adpterresetinprogress flag unset")); + + con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done")); + return (DDI_SUCCESS); + +} + + +/* + * mrsas_sync_map_info - Returns FW's ld_map structure + * @instance: Adapter soft state + * + * Issues an internal command (DCMD) to get the FW's controller PD + * list structure. This information is mainly used to find out SYSTEM + * supported by the FW. + */ + +static int +mrsas_tbolt_sync_map_info(struct mrsas_instance *instance) +{ + int ret = 0, i; + struct mrsas_cmd *cmd = NULL; + struct mrsas_dcmd_frame *dcmd; + uint32_t size_sync_info, num_lds; + LD_TARGET_SYNC *ci = NULL; + MR_FW_RAID_MAP_ALL *map; + MR_LD_RAID *raid; + LD_TARGET_SYNC *ld_sync; + uint32_t ci_h = 0; + uint32_t size_map_info; + + cmd = get_raid_msg_pkt(instance); + + if (cmd == NULL) { + cmn_err(CE_WARN, "Failed to get a cmd from free-pool in " + "mrsas_tbolt_sync_map_info(). "); + return (DDI_FAILURE); + } + + /* Clear the frame buffer and assign back the context id */ + bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame)); + ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, + cmd->index); + bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t)); + + + map = instance->ld_map[instance->map_id & 1]; + + num_lds = map->raidMap.ldCount; + + dcmd = &cmd->frame->dcmd; + + size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds; + + con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x", + size_sync_info, num_lds)); + + ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1]; + + bzero(ci, sizeof (MR_FW_RAID_MAP_ALL)); + ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1]; + + bzero(dcmd->mbox.b, DCMD_MBOX_SZ); + + ld_sync = (LD_TARGET_SYNC *)ci; + + for (i = 0; i < num_lds; i++, ld_sync++) { + raid = MR_LdRaidGet(i, map); + + con_log(CL_ANN1, + (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x", + i, raid->seqNum, raid->flags.ldSyncRequired)); + + ld_sync->ldTargetId = MR_GetLDTgtId(i, map); + + con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x", + i, ld_sync->ldTargetId)); + + ld_sync->seqNum = raid->seqNum; + } + + + size_map_info = sizeof (MR_FW_RAID_MAP) + + (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1)); + + dcmd->cmd = MFI_CMD_OP_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_WRITE; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = size_map_info; + ASSERT(num_lds <= 255); + dcmd->mbox.b[0] = (U8)num_lds; + dcmd->mbox.b[1] = 1; /* Pend */ + dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; + dcmd->sgl.sge32[0].phys_addr = ci_h; + dcmd->sgl.sge32[0].length = size_map_info; + + + instance->map_update_cmd = cmd; + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + + instance->func_ptr->issue_cmd(cmd, instance); + + instance->unroll.syncCmd = 1; + con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID)); + + return (ret); +} + +/* + * abort_syncmap_cmd + */ +int +abort_syncmap_cmd(struct mrsas_instance *instance, + struct mrsas_cmd *cmd_to_abort) +{ + int ret = 0; + + struct mrsas_cmd *cmd; + struct mrsas_abort_frame *abort_fr; + + con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__)); + + cmd = get_raid_msg_mfi_pkt(instance); + + if (!cmd) { + cmn_err(CE_WARN, + "Failed to get a cmd from free-pool abort_syncmap_cmd()."); + return (DDI_FAILURE); + } + /* Clear the frame buffer and assign back the context id */ + bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame)); + ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, + cmd->index); + + abort_fr = &cmd->frame->abort; + + /* prepare and issue the abort frame */ + ddi_put8(cmd->frame_dma_obj.acc_handle, + &abort_fr->cmd, MFI_CMD_OP_ABORT); + ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status, + MFI_CMD_STATUS_SYNC_MODE); + ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0); + ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context, + cmd_to_abort->index); + ddi_put32(cmd->frame_dma_obj.acc_handle, + &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr); + ddi_put32(cmd->frame_dma_obj.acc_handle, + &abort_fr->abort_mfi_phys_addr_hi, 0); + + cmd->frame_count = 1; + + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + + if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { + con_log(CL_ANN1, (CE_WARN, + "abort_ldsync_cmd: issue_cmd_in_poll_mode failed")); + ret = -1; + } else { + ret = 0; + } + + return_raid_msg_mfi_pkt(instance, cmd); + + atomic_add_16(&instance->fw_outstanding, (-1)); + + return (ret); +} + + +#ifdef PDSUPPORT +int +mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt, + uint8_t lun, dev_info_t **ldip) +{ + struct scsi_device *sd; + dev_info_t *child; + int rval, dtype; + struct mrsas_tbolt_pd_info *pds = NULL; + + con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d", + tgt, lun)); + + if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { + if (ldip) { + *ldip = child; + } + if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) { + rval = mrsas_service_evt(instance, tgt, 1, + MRSAS_EVT_UNCONFIG_TGT, NULL); + con_log(CL_ANN1, (CE_WARN, + "mr_sas:DELETING STALE ENTRY rval = %d " + "tgt id = %d", rval, tgt)); + return (NDI_FAILURE); + } + return (NDI_SUCCESS); + } + + pds = (struct mrsas_tbolt_pd_info *) + kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP); + mrsas_tbolt_get_pd_info(instance, pds, tgt); + dtype = pds->scsiDevType; + + /* Check for Disk */ + if ((dtype == DTYPE_DIRECT)) { + if ((dtype == DTYPE_DIRECT) && + (LE_16(pds->fwState) != PD_SYSTEM)) { + kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info)); + return (NDI_FAILURE); + } + sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP); + sd->sd_address.a_hba_tran = instance->tran; + sd->sd_address.a_target = (uint16_t)tgt; + sd->sd_address.a_lun = (uint8_t)lun; + + if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) { + rval = mrsas_config_scsi_device(instance, sd, ldip); + con_log(CL_DLEVEL1, (CE_NOTE, + "Phys. device found: tgt %d dtype %d: %s", + tgt, dtype, sd->sd_inq->inq_vid)); + } else { + rval = NDI_FAILURE; + con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found " + "scsi_hba_probe Failed: tgt %d dtype %d: %s", + tgt, dtype, sd->sd_inq->inq_vid)); + } + + /* sd_unprobe is blank now. Free buffer manually */ + if (sd->sd_inq) { + kmem_free(sd->sd_inq, SUN_INQSIZE); + sd->sd_inq = (struct scsi_inquiry *)NULL; + } + kmem_free(sd, sizeof (struct scsi_device)); + rval = NDI_SUCCESS; + } else { + con_log(CL_ANN1, (CE_NOTE, + "Device not supported: tgt %d lun %d dtype %d", + tgt, lun, dtype)); + rval = NDI_FAILURE; + } + + kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info)); + con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d", + rval)); + return (rval); +} + +static void +mrsas_tbolt_get_pd_info(struct mrsas_instance *instance, + struct mrsas_tbolt_pd_info *pds, int tgt) +{ + struct mrsas_cmd *cmd; + struct mrsas_dcmd_frame *dcmd; + dma_obj_t dcmd_dma_obj; + + cmd = get_raid_msg_pkt(instance); + + if (!cmd) { + con_log(CL_ANN1, + (CE_WARN, "Failed to get a cmd for get pd info")); + return; + } + + /* Clear the frame buffer and assign back the context id */ + bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame)); + ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, + cmd->index); + + + dcmd = &cmd->frame->dcmd; + dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info); + dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; + dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; + dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; + dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; + dcmd_dma_obj.dma_attr.dma_attr_align = 1; + + (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, + DDI_STRUCTURE_LE_ACC); + bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info)); + bzero(dcmd->mbox.b, 12); + ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); + ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0); + ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); + ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, + MFI_FRAME_DIR_READ); + ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); + ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, + sizeof (struct mrsas_tbolt_pd_info)); + ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, + MR_DCMD_PD_GET_INFO); + ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt); + ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, + sizeof (struct mrsas_tbolt_pd_info)); + ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, + dcmd_dma_obj.dma_cookie[0].dmac_address); + + cmd->sync_cmd = MRSAS_TRUE; + cmd->frame_count = 1; + + if (instance->tbolt) { + mr_sas_tbolt_build_mfi_cmd(instance, cmd); + } + + instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd); + + ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds, + (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info), + DDI_DEV_AUTOINCR); + (void) mrsas_free_dma_obj(instance, dcmd_dma_obj); + return_raid_msg_pkt(instance, cmd); +} +#endif