view usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_solaris.c @ 10281:422e069d2f7f

6793438 Direct attach tape fails to reconnect after library reboot or cable disconnect 6845887 vfs_mount panic at boot on a X4450 / snv_115
author Sukumar Swaminathan <Sukumar.Swaminathan@Sun.COM>
date Fri, 07 Aug 2009 15:01:35 -0700
parents 9b9d318575c5
children 65c1d51a12b9
line wrap: on
line source

/*
 * CDDL HEADER START
 *
 * The contents of this file are subject to the terms of the
 * Common Development and Distribution License (the "License").
 * You may not use this file except in compliance with the License.
 *
 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
 * or http://www.opensolaris.org/os/licensing.
 * See the License for the specific language governing permissions
 * and limitations under the License.
 *
 * When distributing Covered Code, include this CDDL HEADER in each
 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
 * If applicable, add the following below this CDDL HEADER, with the
 * fields enclosed by brackets "[]" replaced with your own identifying
 * information: Portions Copyright [yyyy] [name of copyright owner]
 *
 * CDDL HEADER END
 */

/*
 * Copyright 2009 Emulex.  All rights reserved.
 * Use is subject to License terms.
 */

#define	DEF_ICFG	1

#include <emlxs.h>
#include <emlxs_version.h>

char emlxs_revision[] = EMLXS_REVISION;
char emlxs_version[] = EMLXS_VERSION;
char emlxs_name[] = EMLXS_NAME;
char emlxs_label[] = EMLXS_LABEL;

/* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
EMLXS_MSG_DEF(EMLXS_SOLARIS_C);

#ifdef MENLO_SUPPORT
static int32_t  emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
#endif /* MENLO_SUPPORT */

static void	emlxs_fca_attach(emlxs_hba_t *hba);
static void	emlxs_fca_detach(emlxs_hba_t *hba);
static void	emlxs_drv_banner(emlxs_hba_t *hba);

static int32_t	emlxs_get_props(emlxs_hba_t *hba);
static int32_t	emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
static int32_t	emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
static int32_t	emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
static int32_t	emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
static int32_t	emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
static int32_t	emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
static int32_t	emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
static int32_t	emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
static uint32_t emlxs_add_instance(int32_t ddiinst);
static void	emlxs_iodone(emlxs_buf_t *sbp);
static int	emlxs_pm_lower_power(dev_info_t *dip);
static int	emlxs_pm_raise_power(dev_info_t *dip);
static void	emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
		    uint32_t failed);
static void	emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
static uint32_t	emlxs_integrity_check(emlxs_hba_t *hba);
static uint32_t	emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
		    uint32_t args, uint32_t *arg);

#ifdef SLI3_SUPPORT
static void	emlxs_read_vport_prop(emlxs_hba_t *hba);
#endif	/* SLI3_SUPPORT */


/*
 * Driver Entry Routines.
 */
static int32_t	emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
static int32_t	emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
static int32_t	emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
static int32_t	emlxs_close(dev_t, int32_t, int32_t, cred_t *);
static int32_t	emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
		    cred_t *, int32_t *);
static int32_t	emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);


/*
 * FC_AL Transport Functions.
 */
static opaque_t	emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *,
		    fc_fca_bind_info_t *);
static void	emlxs_unbind_port(opaque_t);
static void	emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
static int32_t	emlxs_get_cap(opaque_t, char *, void *);
static int32_t	emlxs_set_cap(opaque_t, char *, void *);
static int32_t	emlxs_get_map(opaque_t, fc_lilpmap_t *);
static int32_t	emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t,
		    uint32_t *, uint32_t);
static int32_t	emlxs_ub_free(opaque_t, uint32_t, uint64_t *);

static opaque_t	emlxs_get_device(opaque_t, fc_portid_t);
static int32_t	emlxs_notify(opaque_t, uint32_t);
static void	emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);

/*
 * Driver Internal Functions.
 */

static void	emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
static int32_t	emlxs_power(dev_info_t *, int32_t, int32_t);
#ifdef EMLXS_I386
#ifdef S11
static int32_t	emlxs_quiesce(dev_info_t *);
#endif
#endif
static int32_t	emlxs_hba_resume(dev_info_t *);
static int32_t	emlxs_hba_suspend(dev_info_t *);
static int32_t	emlxs_hba_detach(dev_info_t *);
static int32_t	emlxs_hba_attach(dev_info_t *);
static void	emlxs_lock_destroy(emlxs_hba_t *);
static void	emlxs_lock_init(emlxs_hba_t *);
static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *,
			uint32_t, uint8_t);

char *emlxs_pm_components[] = {
	"NAME=emlxx000",
	"0=Device D3 State",
	"1=Device D0 State"
};


/*
 * Default emlx dma limits
 */
ddi_dma_lim_t emlxs_dma_lim = {
	(uint32_t)0,				/* dlim_addr_lo */
	(uint32_t)0xffffffff,			/* dlim_addr_hi */
	(uint_t)0x00ffffff,			/* dlim_cntr_max */
	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
	1,					/* dlim_minxfer */
	0x00ffffff				/* dlim_dmaspeed */
};

/*
 * Be careful when using these attributes; the defaults listed below are
 * (almost) the most general case, permitting allocation in almost any
 * way supported by the LightPulse family.  The sole exception is the
 * alignment specified as requiring memory allocation on a 4-byte boundary;
 * the Lightpulse can DMA memory on any byte boundary.
 *
 * The LightPulse family currently is limited to 16M transfers;
 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
 */
ddi_dma_attr_t emlxs_dma_attr = {
	DMA_ATTR_V0,				/* dma_attr_version */
	(uint64_t)0,				/* dma_attr_addr_lo */
	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
	1,					/* dma_attr_align */
	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
	1,					/* dma_attr_minxfer */
	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
	(uint64_t)0xffffffff,			/* dma_attr_seg */
	EMLXS_SGLLEN,				/* dma_attr_sgllen */
	1,					/* dma_attr_granular */
	0					/* dma_attr_flags */
};

ddi_dma_attr_t emlxs_dma_attr_ro = {
	DMA_ATTR_V0,				/* dma_attr_version */
	(uint64_t)0,				/* dma_attr_addr_lo */
	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
	1,					/* dma_attr_align */
	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
	1,					/* dma_attr_minxfer */
	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
	(uint64_t)0xffffffff,			/* dma_attr_seg */
	EMLXS_SGLLEN,				/* dma_attr_sgllen */
	1,					/* dma_attr_granular */
	DDI_DMA_RELAXED_ORDERING		/* dma_attr_flags */
};

ddi_dma_attr_t emlxs_dma_attr_1sg = {
	DMA_ATTR_V0,				/* dma_attr_version */
	(uint64_t)0,				/* dma_attr_addr_lo */
	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
	1,					/* dma_attr_align */
	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
	1,					/* dma_attr_minxfer */
	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
	(uint64_t)0xffffffff,			/* dma_attr_seg */
	1,					/* dma_attr_sgllen */
	1,					/* dma_attr_granular */
	0					/* dma_attr_flags */
};

#if (EMLXS_MODREV >= EMLXS_MODREV3)
ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
	DMA_ATTR_V0,				/* dma_attr_version */
	(uint64_t)0,				/* dma_attr_addr_lo */
	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
	1,					/* dma_attr_align */
	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
	1,					/* dma_attr_minxfer */
	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
	(uint64_t)0xffffffff,			/* dma_attr_seg */
	EMLXS_SGLLEN,				/* dma_attr_sgllen */
	1,					/* dma_attr_granular */
	0					/* dma_attr_flags */
};
#endif	/* >= EMLXS_MODREV3 */

/*
 * DDI access attributes for device
 */
ddi_device_acc_attr_t emlxs_dev_acc_attr = {
	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
	DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian		*/
	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
};

/*
 * DDI access attributes for data
 */
ddi_device_acc_attr_t emlxs_data_acc_attr = {
	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
	DDI_NEVERSWAP_ACC,	/* don't swap for Data		*/
	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
};

/*
 * Fill in the FC Transport structure,
 * as defined in the Fibre Channel Transport Programmming Guide.
 */
#if (EMLXS_MODREV == EMLXS_MODREV5)
	static fc_fca_tran_t emlxs_fca_tran = {
	FCTL_FCA_MODREV_5, 		/* fca_version, with SUN NPIV support */
	MAX_VPORTS,			/* fca numerb of ports */
	sizeof (emlxs_buf_t),		/* fca pkt size */
	2048,				/* fca cmd max */
	&emlxs_dma_lim,			/* fca dma limits */
	0,				/* fca iblock, to be filled in later */
	&emlxs_dma_attr,		/* fca dma attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
	&emlxs_data_acc_attr,   	/* fca access atributes */
	0,				/* fca_num_npivports */
	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
	emlxs_bind_port,
	emlxs_unbind_port,
	emlxs_pkt_init,
	emlxs_pkt_uninit,
	emlxs_transport,
	emlxs_get_cap,
	emlxs_set_cap,
	emlxs_get_map,
	emlxs_transport,
	emlxs_ub_alloc,
	emlxs_ub_free,
	emlxs_ub_release,
	emlxs_pkt_abort,
	emlxs_reset,
	emlxs_port_manage,
	emlxs_get_device,
	emlxs_notify
};
#endif	/* EMLXS_MODREV5 */


#if (EMLXS_MODREV == EMLXS_MODREV4)
static fc_fca_tran_t emlxs_fca_tran = {
	FCTL_FCA_MODREV_4,		/* fca_version */
	MAX_VPORTS,			/* fca numerb of ports */
	sizeof (emlxs_buf_t),		/* fca pkt size */
	2048,				/* fca cmd max */
	&emlxs_dma_lim,			/* fca dma limits */
	0,				/* fca iblock, to be filled in later */
	&emlxs_dma_attr,		/* fca dma attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
	&emlxs_data_acc_attr,		/* fca access atributes */
	emlxs_bind_port,
	emlxs_unbind_port,
	emlxs_pkt_init,
	emlxs_pkt_uninit,
	emlxs_transport,
	emlxs_get_cap,
	emlxs_set_cap,
	emlxs_get_map,
	emlxs_transport,
	emlxs_ub_alloc,
	emlxs_ub_free,
	emlxs_ub_release,
	emlxs_pkt_abort,
	emlxs_reset,
	emlxs_port_manage,
	emlxs_get_device,
	emlxs_notify
};
#endif	/* EMLXS_MODEREV4 */


#if (EMLXS_MODREV == EMLXS_MODREV3)
static fc_fca_tran_t emlxs_fca_tran = {
	FCTL_FCA_MODREV_3,		/* fca_version */
	MAX_VPORTS,			/* fca numerb of ports */
	sizeof (emlxs_buf_t),		/* fca pkt size */
	2048,				/* fca cmd max */
	&emlxs_dma_lim,			/* fca dma limits */
	0,				/* fca iblock, to be filled in later */
	&emlxs_dma_attr,		/* fca dma attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
	&emlxs_data_acc_attr,		/* fca access atributes */
	emlxs_bind_port,
	emlxs_unbind_port,
	emlxs_pkt_init,
	emlxs_pkt_uninit,
	emlxs_transport,
	emlxs_get_cap,
	emlxs_set_cap,
	emlxs_get_map,
	emlxs_transport,
	emlxs_ub_alloc,
	emlxs_ub_free,
	emlxs_ub_release,
	emlxs_pkt_abort,
	emlxs_reset,
	emlxs_port_manage,
	emlxs_get_device,
	emlxs_notify
};
#endif	/* EMLXS_MODREV3 */


#if (EMLXS_MODREV == EMLXS_MODREV2)
static fc_fca_tran_t emlxs_fca_tran = {
	FCTL_FCA_MODREV_2,		/* fca_version */
	MAX_VPORTS,			/* number of ports */
	sizeof (emlxs_buf_t),		/* pkt size */
	2048,				/* max cmds */
	&emlxs_dma_lim,			/* DMA limits */
	0,				/* iblock, to be filled in later */
	&emlxs_dma_attr,		/* dma attributes */
	&emlxs_data_acc_attr,		/* access atributes */
	emlxs_bind_port,
	emlxs_unbind_port,
	emlxs_pkt_init,
	emlxs_pkt_uninit,
	emlxs_transport,
	emlxs_get_cap,
	emlxs_set_cap,
	emlxs_get_map,
	emlxs_transport,
	emlxs_ub_alloc,
	emlxs_ub_free,
	emlxs_ub_release,
	emlxs_pkt_abort,
	emlxs_reset,
	emlxs_port_manage,
	emlxs_get_device,
	emlxs_notify
};
#endif	/* EMLXS_MODREV2 */

/*
 * This is needed when the module gets loaded by the kernel
 * so ddi library calls get resolved.
 */
#ifndef MODSYM_SUPPORT
char   _depends_on[] = "misc/fctl";
#endif /* MODSYM_SUPPORT */

/*
 * state pointer which the implementation uses as a place to
 * hang a set of per-driver structures;
 *
 */
void		*emlxs_soft_state = NULL;

/*
 * Driver Global variables.
 */
int32_t		emlxs_scsi_reset_delay = 3000;	/* milliseconds */

emlxs_device_t  emlxs_device;

uint32_t	emlxs_instance[MAX_FC_BRDS];	/* uses emlxs_device.lock */
uint32_t	emlxs_instance_count = 0;	/* uses emlxs_device.lock */


/*
 * Single private "global" lock used to gain access to
 * the hba_list and/or any other case where we want need to be
 * single-threaded.
 */
uint32_t	emlxs_diag_state;

/*
 * CB ops vector.  Used for administration only.
 */
static struct cb_ops emlxs_cb_ops = {
	emlxs_open,	/* cb_open	*/
	emlxs_close,	/* cb_close	*/
	nodev,		/* cb_strategy	*/
	nodev,		/* cb_print	*/
	nodev,		/* cb_dump	*/
	nodev,		/* cb_read	*/
	nodev,		/* cb_write	*/
	emlxs_ioctl,	/* cb_ioctl	*/
	nodev,		/* cb_devmap	*/
	nodev,		/* cb_mmap	*/
	nodev,		/* cb_segmap	*/
	nochpoll,	/* cb_chpoll	*/
	ddi_prop_op,	/* cb_prop_op	*/
	0,		/* cb_stream	*/
#ifdef _LP64
	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
#else
	D_HOTPLUG | D_MP | D_NEW,		/* cb_flag */
#endif
	CB_REV,		/* rev		*/
	nodev,		/* cb_aread	*/
	nodev		/* cb_awrite	*/
};

static struct dev_ops emlxs_ops = {
	DEVO_REV,	/* rev */
	0,	/* refcnt */
	emlxs_info,	/* getinfo	*/
	nulldev,	/* identify	*/
	nulldev,	/* probe	*/
	emlxs_attach,	/* attach	*/
	emlxs_detach,	/* detach	*/
	nodev,		/* reset	*/
	&emlxs_cb_ops,	/* devo_cb_ops	*/
	NULL,		/* devo_bus_ops */
	emlxs_power,	/* power ops	*/
#ifdef EMLXS_I386
#ifdef S11
	emlxs_quiesce,	/* quiesce	*/
#endif
#endif
};

#include <sys/modctl.h>
extern struct mod_ops mod_driverops;

#ifdef SAN_DIAG_SUPPORT
extern kmutex_t		sd_bucket_mutex;
extern sd_bucket_info_t	sd_bucket;
#endif /* SAN_DIAG_SUPPORT */

/*
 * Module linkage information for the kernel.
 */
static struct modldrv emlxs_modldrv = {
	&mod_driverops,	/* module type - driver */
	emlxs_name,	/* module name */
	&emlxs_ops,	/* driver ops */
};


/*
 * Driver module linkage structure
 */
static struct modlinkage emlxs_modlinkage = {
	MODREV_1,	/* ml_rev - must be MODREV_1 */
	&emlxs_modldrv,	/* ml_linkage */
	NULL	/* end of driver linkage */
};


/* We only need to add entries for non-default return codes. */
/* Entries do not need to be in order. */
/* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
/*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE */

emlxs_xlat_err_t emlxs_iostat_tbl[] = {
/* 	{f/w code, pkt_state, pkt_reason, 	*/
/* 		pkt_expln, pkt_action}		*/

	/* 0x00 - Do not remove */
	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x01 - Do not remove */
	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x02 */
	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
		FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},

	/*
	 * This is a default entry.
	 * The real codes are written dynamically in emlxs_els.c
	 */
	/* 0x09 */
	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* Special error code */
	/* 0x10 */
	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* Special error code */
	/* 0x11 */
	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* CLASS 2 only */
	/* 0x04 */
	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* CLASS 2 only */
	/* 0x05 */
	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* CLASS 2 only */
	/* 0x06 */
	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},

	/* CLASS 2 only */
	/* 0x07 */
	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
};

#define	IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))


/* We only need to add entries for non-default return codes. */
/* Entries do not need to be in order. */
/* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
/*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE} */

emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
/*	{f/w code, pkt_state, pkt_reason,	*/
/*		pkt_expln, pkt_action}		*/

	/* 0x01 */
	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x02 */
	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x04 */
	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x05 */
	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x06 */
	{IOERR_ILLEGAL_COMMAND,	FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x07 */
	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT,	FC_REASON_XCHG_DROPPED,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x08 */
	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_REQ,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x0B */
	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x0D */
	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x0E */
	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x0F */
	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_FRAME,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x11 */
	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT,	FC_REASON_NOMEM,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x13 */
	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x14 */
	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x15 */
	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x16 */
	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x17 */
	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x18 */
	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x1A */
	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0x21 */
	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* Occurs at link down */
	/* 0x28 */
	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},

	/* 0xF0 */
	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
};

#define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))



emlxs_table_t emlxs_error_table[] = {
	{IOERR_SUCCESS, "No error."},
	{IOERR_MISSING_CONTINUE, "Missing continue."},
	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
	{IOERR_INTERNAL_ERROR, "Internal error."},
	{IOERR_INVALID_RPI, "Invalid RPI."},
	{IOERR_NO_XRI, "No XRI."},
	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
	{IOERR_XCHG_DROPPED, "Exchange dropped."},
	{IOERR_ILLEGAL_FIELD, "Illegal field."},
	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
	{IOERR_NO_RESOURCES, "No resources."},
	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
	{IOERR_ABORT_REQUESTED, "Abort requested."},
	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
	{IOERR_RING_RESET, "Ring reset."},
	{IOERR_LINK_DOWN, "Link down."},
	{IOERR_CORRUPTED_DATA, "Corrupted data."},
	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
	{IOERR_DUP_FRAME, "Duplicate frame."},
	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
	{IOERR_INSUF_BUFFER, "Buffer too small."},
	{IOERR_MISSING_SI, "ELS frame missing SI"},
	{IOERR_MISSING_ES, "Exhausted burst without ES"},
	{IOERR_INCOMP_XFER, "Transfer incomplete."},
	{IOERR_ABORT_TIMEOUT, "Abort timeout."}

};	/* emlxs_error_table */


emlxs_table_t emlxs_state_table[] = {
	{IOSTAT_SUCCESS, "Success."},
	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
	{IOSTAT_REMOTE_STOP, "Remote stop."},
	{IOSTAT_LOCAL_REJECT, "Local reject."},
	{IOSTAT_NPORT_RJT, "NPort reject."},
	{IOSTAT_FABRIC_RJT, "Fabric reject."},
	{IOSTAT_NPORT_BSY, "Nport busy."},
	{IOSTAT_FABRIC_BSY, "Fabric busy."},
	{IOSTAT_INTERMED_RSP, "Intermediate response."},
	{IOSTAT_LS_RJT, "LS reject."},
	{IOSTAT_CMD_REJECT,		"Cmd reject."},
	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
	{IOSTAT_NEED_BUF_ENTRY, "Need buffer entry."},
	{IOSTAT_ILLEGAL_FRAME_RCVD, "Illegal frame."},
	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
	{IOSTAT_DATA_OVERRUN,  "Data overrun."},

};	/* emlxs_state_table */


#ifdef MENLO_SUPPORT
emlxs_table_t emlxs_menlo_cmd_table[] = {
	{MENLO_CMD_INITIALIZE,		"MENLO_INIT"},
	{MENLO_CMD_FW_DOWNLOAD,		"MENLO_FW_DOWNLOAD"},
	{MENLO_CMD_READ_MEMORY,		"MENLO_READ_MEM"},
	{MENLO_CMD_WRITE_MEMORY,	"MENLO_WRITE_MEM"},
	{MENLO_CMD_FTE_INSERT,		"MENLO_FTE_INSERT"},
	{MENLO_CMD_FTE_DELETE,		"MENLO_FTE_DELETE"},

	{MENLO_CMD_GET_INIT,		"MENLO_GET_INIT"},
	{MENLO_CMD_GET_CONFIG,		"MENLO_GET_CONFIG"},
	{MENLO_CMD_GET_PORT_STATS,	"MENLO_GET_PORT_STATS"},
	{MENLO_CMD_GET_LIF_STATS,	"MENLO_GET_LIF_STATS"},
	{MENLO_CMD_GET_ASIC_STATS,	"MENLO_GET_ASIC_STATS"},
	{MENLO_CMD_GET_LOG_CONFIG,	"MENLO_GET_LOG_CFG"},
	{MENLO_CMD_GET_LOG_DATA,	"MENLO_GET_LOG_DATA"},
	{MENLO_CMD_GET_PANIC_LOG,	"MENLO_GET_PANIC_LOG"},
	{MENLO_CMD_GET_LB_MODE,		"MENLO_GET_LB_MODE"},

	{MENLO_CMD_SET_PAUSE,		"MENLO_SET_PAUSE"},
	{MENLO_CMD_SET_FCOE_COS,	"MENLO_SET_FCOE_COS"},
	{MENLO_CMD_SET_UIF_PORT_TYPE,	"MENLO_SET_UIF_TYPE"},

	{MENLO_CMD_DIAGNOSTICS,		"MENLO_DIAGNOSTICS"},
	{MENLO_CMD_LOOPBACK,		"MENLO_LOOPBACK"},

	{MENLO_CMD_RESET,		"MENLO_RESET"},
	{MENLO_CMD_SET_MODE,		"MENLO_SET_MODE"}

};	/* emlxs_menlo_cmd_table */

emlxs_table_t emlxs_menlo_rsp_table[] = {
	{MENLO_RSP_SUCCESS,		"SUCCESS"},
	{MENLO_ERR_FAILED,		"FAILED"},
	{MENLO_ERR_INVALID_CMD,		"INVALID_CMD"},
	{MENLO_ERR_INVALID_CREDIT,	"INVALID_CREDIT"},
	{MENLO_ERR_INVALID_SIZE,	"INVALID_SIZE"},
	{MENLO_ERR_INVALID_ADDRESS,	"INVALID_ADDRESS"},
	{MENLO_ERR_INVALID_CONTEXT,	"INVALID_CONTEXT"},
	{MENLO_ERR_INVALID_LENGTH,	"INVALID_LENGTH"},
	{MENLO_ERR_INVALID_TYPE,	"INVALID_TYPE"},
	{MENLO_ERR_INVALID_DATA,	"INVALID_DATA"},
	{MENLO_ERR_INVALID_VALUE1,	"INVALID_VALUE1"},
	{MENLO_ERR_INVALID_VALUE2,	"INVALID_VALUE2"},
	{MENLO_ERR_INVALID_MASK,	"INVALID_MASK"},
	{MENLO_ERR_CHECKSUM,		"CHECKSUM_ERROR"},
	{MENLO_ERR_UNKNOWN_FCID,	"UNKNOWN_FCID"},
	{MENLO_ERR_UNKNOWN_WWN,		"UNKNOWN_WWN"},
	{MENLO_ERR_BUSY,		"BUSY"},

};	/* emlxs_menlo_rsp_table */

#endif /* MENLO_SUPPORT */


emlxs_table_t emlxs_mscmd_table[] = {
	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
	{MS_GTIN, "MS_GTIN"},
	{MS_GIEL, "MS_GIEL"},
	{MS_GIET, "MS_GIET"},
	{MS_GDID, "MS_GDID"},
	{MS_GMID, "MS_GMID"},
	{MS_GFN, "MS_GFN"},
	{MS_GIELN, "MS_GIELN"},
	{MS_GMAL, "MS_GMAL"},
	{MS_GIEIL, "MS_GIEIL"},
	{MS_GPL, "MS_GPL"},
	{MS_GPT, "MS_GPT"},
	{MS_GPPN, "MS_GPPN"},
	{MS_GAPNL, "MS_GAPNL"},
	{MS_GPS, "MS_GPS"},
	{MS_GPSC, "MS_GPSC"},
	{MS_GATIN, "MS_GATIN"},
	{MS_GSES, "MS_GSES"},
	{MS_GPLNL, "MS_GPLNL"},
	{MS_GPLT, "MS_GPLT"},
	{MS_GPLML, "MS_GPLML"},
	{MS_GPAB, "MS_GPAB"},
	{MS_GNPL, "MS_GNPL"},
	{MS_GPNL, "MS_GPNL"},
	{MS_GPFCP, "MS_GPFCP"},
	{MS_GPLI, "MS_GPLI"},
	{MS_GNID, "MS_GNID"},
	{MS_RIELN, "MS_RIELN"},
	{MS_RPL, "MS_RPL"},
	{MS_RPLN, "MS_RPLN"},
	{MS_RPLT, "MS_RPLT"},
	{MS_RPLM, "MS_RPLM"},
	{MS_RPAB, "MS_RPAB"},
	{MS_RPFCP, "MS_RPFCP"},
	{MS_RPLI, "MS_RPLI"},
	{MS_DPL, "MS_DPL"},
	{MS_DPLN, "MS_DPLN"},
	{MS_DPLM, "MS_DPLM"},
	{MS_DPLML, "MS_DPLML"},
	{MS_DPLI, "MS_DPLI"},
	{MS_DPAB, "MS_DPAB"},
	{MS_DPALL, "MS_DPALL"}

};	/* emlxs_mscmd_table */


emlxs_table_t emlxs_ctcmd_table[] = {
	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
	{SLI_CTNS_GA_NXT, "GA_NXT"},
	{SLI_CTNS_GPN_ID, "GPN_ID"},
	{SLI_CTNS_GNN_ID, "GNN_ID"},
	{SLI_CTNS_GCS_ID, "GCS_ID"},
	{SLI_CTNS_GFT_ID, "GFT_ID"},
	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
	{SLI_CTNS_GPT_ID, "GPT_ID"},
	{SLI_CTNS_GID_PN, "GID_PN"},
	{SLI_CTNS_GID_NN, "GID_NN"},
	{SLI_CTNS_GIP_NN, "GIP_NN"},
	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
	{SLI_CTNS_GNN_IP, "GNN_IP"},
	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
	{SLI_CTNS_GID_FT, "GID_FT"},
	{SLI_CTNS_GID_PT, "GID_PT"},
	{SLI_CTNS_RPN_ID, "RPN_ID"},
	{SLI_CTNS_RNN_ID, "RNN_ID"},
	{SLI_CTNS_RCS_ID, "RCS_ID"},
	{SLI_CTNS_RFT_ID, "RFT_ID"},
	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
	{SLI_CTNS_RPT_ID, "RPT_ID"},
	{SLI_CTNS_RIP_NN, "RIP_NN"},
	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
	{SLI_CTNS_DA_ID, "DA_ID"},
	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */

};	/* emlxs_ctcmd_table */



emlxs_table_t emlxs_rmcmd_table[] = {
	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
	{CT_OP_GSAT, "RM_GSAT"},
	{CT_OP_GHAT, "RM_GHAT"},
	{CT_OP_GPAT, "RM_GPAT"},
	{CT_OP_GDAT, "RM_GDAT"},
	{CT_OP_GPST, "RM_GPST"},
	{CT_OP_GDP, "RM_GDP"},
	{CT_OP_GDPG, "RM_GDPG"},
	{CT_OP_GEPS, "RM_GEPS"},
	{CT_OP_GLAT, "RM_GLAT"},
	{CT_OP_SSAT, "RM_SSAT"},
	{CT_OP_SHAT, "RM_SHAT"},
	{CT_OP_SPAT, "RM_SPAT"},
	{CT_OP_SDAT, "RM_SDAT"},
	{CT_OP_SDP, "RM_SDP"},
	{CT_OP_SBBS, "RM_SBBS"},
	{CT_OP_RPST, "RM_RPST"},
	{CT_OP_VFW, "RM_VFW"},
	{CT_OP_DFW, "RM_DFW"},
	{CT_OP_RES, "RM_RES"},
	{CT_OP_RHD, "RM_RHD"},
	{CT_OP_UFW, "RM_UFW"},
	{CT_OP_RDP, "RM_RDP"},
	{CT_OP_GHDR, "RM_GHDR"},
	{CT_OP_CHD, "RM_CHD"},
	{CT_OP_SSR, "RM_SSR"},
	{CT_OP_RSAT, "RM_RSAT"},
	{CT_OP_WSAT, "RM_WSAT"},
	{CT_OP_RSAH, "RM_RSAH"},
	{CT_OP_WSAH, "RM_WSAH"},
	{CT_OP_RACT, "RM_RACT"},
	{CT_OP_WACT, "RM_WACT"},
	{CT_OP_RKT, "RM_RKT"},
	{CT_OP_WKT, "RM_WKT"},
	{CT_OP_SSC, "RM_SSC"},
	{CT_OP_QHBA, "RM_QHBA"},
	{CT_OP_GST, "RM_GST"},
	{CT_OP_GFTM, "RM_GFTM"},
	{CT_OP_SRL, "RM_SRL"},
	{CT_OP_SI, "RM_SI"},
	{CT_OP_SRC, "RM_SRC"},
	{CT_OP_GPB, "RM_GPB"},
	{CT_OP_SPB, "RM_SPB"},
	{CT_OP_RPB, "RM_RPB"},
	{CT_OP_RAPB, "RM_RAPB"},
	{CT_OP_GBC, "RM_GBC"},
	{CT_OP_GBS, "RM_GBS"},
	{CT_OP_SBS, "RM_SBS"},
	{CT_OP_GANI, "RM_GANI"},
	{CT_OP_GRV, "RM_GRV"},
	{CT_OP_GAPBS, "RM_GAPBS"},
	{CT_OP_APBC, "RM_APBC"},
	{CT_OP_GDT, "RM_GDT"},
	{CT_OP_GDLMI, "RM_GDLMI"},
	{CT_OP_GANA, "RM_GANA"},
	{CT_OP_GDLV, "RM_GDLV"},
	{CT_OP_GWUP, "RM_GWUP"},
	{CT_OP_GLM, "RM_GLM"},
	{CT_OP_GABS, "RM_GABS"},
	{CT_OP_SABS, "RM_SABS"},
	{CT_OP_RPR, "RM_RPR"},
	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */

};	/* emlxs_rmcmd_table */


emlxs_table_t emlxs_elscmd_table[] = {
	{ELS_CMD_ACC, "ACC"},
	{ELS_CMD_LS_RJT, "LS_RJT"},
	{ELS_CMD_PLOGI, "PLOGI"},
	{ELS_CMD_FLOGI, "FLOGI"},
	{ELS_CMD_LOGO, "LOGO"},
	{ELS_CMD_ABTX, "ABTX"},
	{ELS_CMD_RCS, "RCS"},
	{ELS_CMD_RES, "RES"},
	{ELS_CMD_RSS, "RSS"},
	{ELS_CMD_RSI, "RSI"},
	{ELS_CMD_ESTS, "ESTS"},
	{ELS_CMD_ESTC, "ESTC"},
	{ELS_CMD_ADVC, "ADVC"},
	{ELS_CMD_RTV, "RTV"},
	{ELS_CMD_RLS, "RLS"},
	{ELS_CMD_ECHO, "ECHO"},
	{ELS_CMD_TEST, "TEST"},
	{ELS_CMD_RRQ, "RRQ"},
	{ELS_CMD_PRLI, "PRLI"},
	{ELS_CMD_PRLO, "PRLO"},
	{ELS_CMD_SCN, "SCN"},
	{ELS_CMD_TPLS, "TPLS"},
	{ELS_CMD_GPRLO, "GPRLO"},
	{ELS_CMD_GAID, "GAID"},
	{ELS_CMD_FACT, "FACT"},
	{ELS_CMD_FDACT, "FDACT"},
	{ELS_CMD_NACT, "NACT"},
	{ELS_CMD_NDACT, "NDACT"},
	{ELS_CMD_QoSR, "QoSR"},
	{ELS_CMD_RVCS, "RVCS"},
	{ELS_CMD_PDISC, "PDISC"},
	{ELS_CMD_FDISC, "FDISC"},
	{ELS_CMD_ADISC, "ADISC"},
	{ELS_CMD_FARP, "FARP"},
	{ELS_CMD_FARPR, "FARPR"},
	{ELS_CMD_FAN, "FAN"},
	{ELS_CMD_RSCN, "RSCN"},
	{ELS_CMD_SCR, "SCR"},
	{ELS_CMD_LINIT, "LINIT"},
	{ELS_CMD_RNID, "RNID"},
	{ELS_CMD_AUTH, "AUTH"}

};	/* emlxs_elscmd_table */


/*
 *
 *	Device Driver Entry Routines
 *
 */

#ifdef MODSYM_SUPPORT
static void emlxs_fca_modclose();
static int  emlxs_fca_modopen();
emlxs_modsym_t emlxs_modsym;

static int
emlxs_fca_modopen()
{
	int err;

	if (emlxs_modsym.mod_fctl) {
		return (EEXIST);
	}

	/* Leadville (fctl) */
	err = 0;
	emlxs_modsym.mod_fctl =
	    ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
	if (!emlxs_modsym.mod_fctl) {
		cmn_err(CE_WARN,
		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
		    DRIVER_NAME, err);

		goto failed;
	}

	err = 0;
	/* Check if the fctl fc_fca_attach is present */
	emlxs_modsym.fc_fca_attach =
	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
	    &err);
	if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
		cmn_err(CE_WARN,
		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
		goto failed;
	}

	err = 0;
	/* Check if the fctl fc_fca_detach is present */
	emlxs_modsym.fc_fca_detach =
	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
	    &err);
	if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
		cmn_err(CE_WARN,
		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
		goto failed;
	}

	err = 0;
	/* Check if the fctl fc_fca_init is present */
	emlxs_modsym.fc_fca_init =
	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
	if ((void *)emlxs_modsym.fc_fca_init == NULL) {
		cmn_err(CE_WARN,
		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
		goto failed;
	}

	return (0);

failed:

	emlxs_fca_modclose();

	return (ENODEV);


} /* emlxs_fca_modopen() */


static void
emlxs_fca_modclose()
{
	if (emlxs_modsym.mod_fctl) {
		(void) ddi_modclose(emlxs_modsym.mod_fctl);
		emlxs_modsym.mod_fctl = 0;
	}

	emlxs_modsym.fc_fca_attach = NULL;
	emlxs_modsym.fc_fca_detach = NULL;
	emlxs_modsym.fc_fca_init   = NULL;

	return;

} /* emlxs_fca_modclose() */

#endif /* MODSYM_SUPPORT */



/*
 * Global driver initialization, called once when driver is loaded
 */
int
_init(void)
{
	int ret;
	char buf[64];

	/*
	 * First init call for this driver,
	 * so initialize the emlxs_dev_ctl structure.
	 */
	bzero(&emlxs_device, sizeof (emlxs_device));

#ifdef MODSYM_SUPPORT
	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
#endif /* MODSYM_SUPPORT */

	(void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
	mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);

	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
	emlxs_device.drv_timestamp = ddi_get_time();

	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
		emlxs_instance[ret] = (uint32_t)-1;
	}

	/*
	 * Provide for one ddiinst of the emlxs_dev_ctl structure
	 * for each possible board in the system.
	 */
	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
		cmn_err(CE_WARN,
		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
		    DRIVER_NAME, ret);

		return (ret);
	}

#ifdef MODSYM_SUPPORT
	/* Open SFS */
	(void) emlxs_fca_modopen();
#ifdef SFCT_SUPPORT
	/* Open FCT */
	(void) emlxs_fct_modopen();
#endif /* SFCT_SUPPORT */
#endif /* MODSYM_SUPPORT */

	/* Setup devops for SFS */
	MODSYM(fc_fca_init)(&emlxs_ops);

	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
		(void) ddi_soft_state_fini(&emlxs_soft_state);
#ifdef MODSYM_SUPPORT
		/* Close SFS */
		emlxs_fca_modclose();
#ifdef SFCT_SUPPORT
		/* Close FCT */
		emlxs_fct_modclose();
#endif /* SFCT_SUPPORT */
#endif /* MODSYM_SUPPORT */

		return (ret);
	}

#ifdef SAN_DIAG_SUPPORT
	(void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME);
	mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL);
#endif /* SAN_DIAG_SUPPORT */

	return (ret);

} /* _init() */


/*
 * Called when driver is unloaded.
 */
int
_fini(void)
{
	int ret;

	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
		return (ret);
	}
#ifdef MODSYM_SUPPORT
	/* Close SFS */
	emlxs_fca_modclose();
#ifdef SFCT_SUPPORT
	/* Close FCT */
	emlxs_fct_modclose();
#endif /* SFCT_SUPPORT */
#endif /* MODSYM_SUPPORT */

	/*
	 * Destroy the soft state structure
	 */
	(void) ddi_soft_state_fini(&emlxs_soft_state);

	/* Destroy the global device lock */
	mutex_destroy(&emlxs_device.lock);

#ifdef SAN_DIAG_SUPPORT
	mutex_destroy(&sd_bucket_mutex);
#endif /* SAN_DIAG_SUPPORT */

	return (ret);

} /* _fini() */



int
_info(struct modinfo *modinfop)
{

	return (mod_info(&emlxs_modlinkage, modinfop));

} /* _info() */


/*
 * Attach an ddiinst of an emlx host adapter.
 * Allocate data structures, initialize the adapter and we're ready to fly.
 */
static int
emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	emlxs_hba_t *hba;
	int ddiinst;
	int emlxinst;
	int rval;

	switch (cmd) {
	case DDI_ATTACH:
		/* If successful this will set EMLXS_PM_IN_ATTACH */
		rval = emlxs_hba_attach(dip);
		break;

	case DDI_PM_RESUME:
		/* This will resume the driver */
		rval = emlxs_pm_raise_power(dip);
		break;

	case DDI_RESUME:
		/* This will resume the driver */
		rval = emlxs_hba_resume(dip);
		break;

	default:
		rval = DDI_FAILURE;
	}

	if (rval == DDI_SUCCESS) {
		ddiinst = ddi_get_instance(dip);
		emlxinst = emlxs_get_instance(ddiinst);
		hba = emlxs_device.hba[emlxinst];

		if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {

			/* Enable driver dump feature */
			mutex_enter(&EMLXS_PORT_LOCK);
			hba->flag |= FC_DUMP_SAFE;
			mutex_exit(&EMLXS_PORT_LOCK);
		}
	}

	return (rval);

} /* emlxs_attach() */


/*
 * Detach/prepare driver to unload (see detach(9E)).
 */
static int
emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
	emlxs_hba_t *hba;
	emlxs_port_t *port;
	int ddiinst;
	int emlxinst;
	int rval;

	ddiinst = ddi_get_instance(dip);
	emlxinst = emlxs_get_instance(ddiinst);
	hba = emlxs_device.hba[emlxinst];

	if (hba == NULL) {
		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);

		return (DDI_FAILURE);
	}

	if (hba == (emlxs_hba_t *)-1) {
		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
		    DRIVER_NAME);

		return (DDI_FAILURE);
	}

	port = &PPORT;
	rval = DDI_SUCCESS;

	/* Check driver dump */
	mutex_enter(&EMLXS_PORT_LOCK);

	if (hba->flag & FC_DUMP_ACTIVE) {
		mutex_exit(&EMLXS_PORT_LOCK);

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
		    "emlxs_detach: Driver busy. Driver dump active.");

		return (DDI_FAILURE);
	}

	hba->flag &= ~FC_DUMP_SAFE;
	mutex_exit(&EMLXS_PORT_LOCK);

	switch (cmd) {
	case DDI_DETACH:

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
		    "DDI_DETACH");

		rval = emlxs_hba_detach(dip);

		if (rval != DDI_SUCCESS) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
			    "Unable to detach.");
		}
		break;


	case DDI_PM_SUSPEND:

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
		    "DDI_PM_SUSPEND");

		/* This will suspend the driver */
		rval = emlxs_pm_lower_power(dip);

		if (rval != DDI_SUCCESS) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
			    "Unable to lower power.");
		}

		break;


	case DDI_SUSPEND:

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
		    "DDI_SUSPEND");

		/* Suspend the driver */
		rval = emlxs_hba_suspend(dip);

		if (rval != DDI_SUCCESS) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
			    "Unable to suspend driver.");
		}
		break;


	default:
		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
		    DRIVER_NAME, cmd);
		rval = DDI_FAILURE;
	}

	if (rval == DDI_FAILURE) {
		/* Re-Enable driver dump feature */
		mutex_enter(&EMLXS_PORT_LOCK);
		hba->flag |= FC_DUMP_SAFE;
		mutex_exit(&EMLXS_PORT_LOCK);
	}

	return (rval);

} /* emlxs_detach() */


/* EMLXS_PORT_LOCK must be held when calling this */
extern void
emlxs_port_init(emlxs_port_t *port)
{
	emlxs_hba_t *hba = HBA;

	/* Initialize the base node */
	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
	port->node_base.nlp_Rpi = 0;
	port->node_base.nlp_DID = 0xffffff;
	port->node_base.nlp_list_next = NULL;
	port->node_base.nlp_list_prev = NULL;
	port->node_base.nlp_active = 1;
	port->node_base.nlp_base = 1;
	port->node_count = 0;

	if (!(port->flag & EMLXS_PORT_ENABLE)) {
		uint8_t dummy_wwn[8] =
		    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };

		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
		    sizeof (NAME_TYPE));
		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
		    sizeof (NAME_TYPE));
	}

	if (!(port->flag & EMLXS_PORT_CONFIG)) {
		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
	}

	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
	    sizeof (SERV_PARM));
	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
	    sizeof (NAME_TYPE));
	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
	    sizeof (NAME_TYPE));

	return;

} /* emlxs_port_init() */



/*
 * emlxs_bind_port
 *
 * Arguments:
 *
 * dip: the dev_info pointer for the ddiinst
 * port_info: pointer to info handed back to the transport
 * bind_info: pointer to info from the transport
 *
 * Return values: a port handle for this port, NULL for failure
 *
 */
static opaque_t
emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
    fc_fca_bind_info_t *bind_info)
{
	emlxs_hba_t *hba;
	emlxs_port_t *port;
	emlxs_port_t *vport;
	int ddiinst;
	emlxs_vpd_t *vpd;
	emlxs_config_t *cfg;
	char *dptr;
	char buffer[16];
	uint32_t length;
	uint32_t len;
	char topology[32];
	char linkspeed[32];

	ddiinst = ddi_get_instance(dip);
	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
	port = &PPORT;

	ddiinst = hba->ddiinst;
	vpd = &VPD;
	cfg = &CFG;

	mutex_enter(&EMLXS_PORT_LOCK);

	if (bind_info->port_num > 0) {
#if (EMLXS_MODREV >= EMLXS_MODREV5)
		if (!(hba->flag & FC_NPIV_ENABLED) ||
		    !(bind_info->port_npiv) ||
		    (bind_info->port_num > hba->vpi_max))
#elif (EMLXS_MODREV >= EMLXS_MODREV3)
		if (!(hba->flag & FC_NPIV_ENABLED) ||
		    (bind_info->port_num > hba->vpi_high))
#endif
		{
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "emlxs_port_bind: Port %d not supported.",
			    bind_info->port_num);

			mutex_exit(&EMLXS_PORT_LOCK);

			port_info->pi_error = FC_OUTOFBOUNDS;
			return (NULL);
		}
	}

	/* Get true port pointer */
	port = &VPORT(bind_info->port_num);

	if (port->tgt_mode) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "emlxs_port_bind: Port %d is in target mode.",
		    bind_info->port_num);

		mutex_exit(&EMLXS_PORT_LOCK);

		port_info->pi_error = FC_OUTOFBOUNDS;
		return (NULL);
	}

	if (!port->ini_mode) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "emlxs_port_bind: Port %d is not in initiator mode.",
		    bind_info->port_num);

		mutex_exit(&EMLXS_PORT_LOCK);

		port_info->pi_error = FC_OUTOFBOUNDS;
		return (NULL);
	}

	/* Make sure the port is not already bound to the transport */
	if (port->flag & EMLXS_PORT_BOUND) {

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "emlxs_port_bind: Port %d already bound. flag=%x",
		    bind_info->port_num, port->flag);

		mutex_exit(&EMLXS_PORT_LOCK);

		port_info->pi_error = FC_ALREADY;
		return (NULL);
	}

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
	    bind_info->port_num, port_info, bind_info);

#if (EMLXS_MODREV >= EMLXS_MODREV5)
	if (bind_info->port_npiv) {
		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
		    sizeof (NAME_TYPE));
		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
		    sizeof (NAME_TYPE));
		if (port->snn[0] == 0) {
			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
			    256);
		}

		if (port->spn[0] == 0) {
			(void) sprintf((caddr_t)port->spn, "%s VPort-%d",
			    (caddr_t)hba->spn, port->vpi);
		}
		port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
	}
#endif /* >= EMLXS_MODREV5 */

	/*
	 * Restricted login should apply both physical and
	 * virtual ports.
	 */
	if (cfg[CFG_VPORT_RESTRICTED].current) {
		port->flag |= EMLXS_PORT_RESTRICTED;
	}

	/* Perform generic port initialization */
	emlxs_port_init(port);

	/* Perform SFS specific initialization */
	port->ulp_handle	= bind_info->port_handle;
	port->ulp_statec_cb	= bind_info->port_statec_cb;
	port->ulp_unsol_cb	= bind_info->port_unsol_cb;
	port->ub_count		= EMLXS_UB_TOKEN_OFFSET;
	port->ub_pool		= NULL;

	/* Update the port info structure */

	/* Set the topology and state */
	if ((hba->state < FC_LINK_UP) ||
	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
		port_info->pi_port_state = FC_STATE_OFFLINE;
		port_info->pi_topology = FC_TOP_UNKNOWN;
	}
#ifdef MENLO_SUPPORT
	else if (hba->flag & FC_MENLO_MODE) {
		port_info->pi_port_state = FC_STATE_OFFLINE;
		port_info->pi_topology = FC_TOP_UNKNOWN;
	}
#endif /* MENLO_SUPPORT */
	else {
		/* Check for loop topology */
		if (hba->topology == TOPOLOGY_LOOP) {
			port_info->pi_port_state = FC_STATE_LOOP;
			(void) strcpy(topology, ", loop");

			if (hba->flag & FC_FABRIC_ATTACHED) {
				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
			} else {
				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
			}
		} else {
			port_info->pi_topology = FC_TOP_FABRIC;
			port_info->pi_port_state = FC_STATE_ONLINE;
			(void) strcpy(topology, ", fabric");
		}

		/* Set the link speed */
		switch (hba->linkspeed) {
		case 0:
			(void) strcpy(linkspeed, "Gb");
			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
			break;

		case LA_1GHZ_LINK:
			(void) strcpy(linkspeed, "1Gb");
			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
			break;
		case LA_2GHZ_LINK:
			(void) strcpy(linkspeed, "2Gb");
			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
			break;
		case LA_4GHZ_LINK:
			(void) strcpy(linkspeed, "4Gb");
			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
			break;
		case LA_8GHZ_LINK:
			(void) strcpy(linkspeed, "8Gb");
			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
			break;
		case LA_10GHZ_LINK:
			(void) strcpy(linkspeed, "10Gb");
			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
			break;
		default:
			(void) sprintf(linkspeed, "unknown(0x%x)",
			    hba->linkspeed);
			break;
		}

		/* Adjusting port context for link up messages */
		vport = port;
		port = &PPORT;
		if (vport->vpi == 0) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
			    linkspeed, topology);
		} else if (!(hba->flag & FC_NPIV_LINKUP)) {
			hba->flag |= FC_NPIV_LINKUP;
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
			    "%s%s", linkspeed, topology);
		}
		port = vport;

	}

	/* Save initial state */
	port->ulp_statec = port_info->pi_port_state;

	/*
	 * The transport needs a copy of the common service parameters
	 * for this port. The transport can get any updates through
	 * the getcap entry point.
	 */
	bcopy((void *) &port->sparam,
	    (void *) &port_info->pi_login_params.common_service,
	    sizeof (SERV_PARM));

#if (EMLXS_MODREVX == EMLXS_MODREV2X)
	/* Swap the service parameters for ULP */
	emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
	    common_service);
#endif /* EMLXS_MODREV2X */

	port_info->pi_login_params.common_service.btob_credit = 0xffff;

	bcopy((void *) &port->wwnn,
	    (void *) &port_info->pi_login_params.node_ww_name,
	    sizeof (NAME_TYPE));

	bcopy((void *) &port->wwpn,
	    (void *) &port_info->pi_login_params.nport_ww_name,
	    sizeof (NAME_TYPE));

	/*
	 * We need to turn off CLASS2 support.
	 * Otherwise, FC transport will use CLASS2 as default class
	 * and never try with CLASS3.
	 */
#if (EMLXS_MODREV >= EMLXS_MODREV3)
#if (EMLXS_MODREVX >= EMLXS_MODREV3X)
	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
	}

	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
	}
#else	/* EMLXS_SPARC or EMLXS_MODREV2X */
	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
	}

	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
	}
#endif	/* >= EMLXS_MODREV3X */
#endif	/* >= EMLXS_MODREV3 */


#if (EMLXS_MODREV <= EMLXS_MODREV2)
	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
		port_info->pi_login_params.class_1.data[0] &= ~0x80;
	}

	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
		port_info->pi_login_params.class_2.data[0] &= ~0x80;
	}
#endif	/* <= EMLXS_MODREV2 */

	/* Additional parameters */
	port_info->pi_s_id.port_id = port->did;
	port_info->pi_s_id.priv_lilp_posit = 0;
	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;

	/* Initialize the RNID parameters */
	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));

	(void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
	    hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
	    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
	    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);

	port_info->pi_rnid_params.params.unit_type  = RNID_HBA;
	port_info->pi_rnid_params.params.port_id    = port->did;
	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;

	/* Initialize the port attributes */
	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));

	(void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");

	port_info->pi_rnid_params.status = FC_SUCCESS;

	(void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);

	(void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
	    vpd->fw_version, vpd->fw_label);

#ifdef EMLXS_I386
	(void) sprintf(port_info->pi_attrs.option_rom_version,
	    "Boot:%s", vpd->boot_version);
#else	/* EMLXS_SPARC */
	(void) sprintf(port_info->pi_attrs.option_rom_version,
	    "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
#endif	/* EMLXS_I386 */


	(void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
	    emlxs_version, emlxs_revision);

	(void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);

	port_info->pi_attrs.vendor_specific_id =
	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);

	port_info->pi_attrs.supported_cos = SWAP_DATA32(FC_NS_CLASS3);

	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;

#if (EMLXS_MODREV >= EMLXS_MODREV5)

	port_info->pi_rnid_params.params.num_attached = 0;

	/*
	 * Copy the serial number string (right most 16 chars) into the right
	 * justified local buffer
	 */
	bzero(buffer, sizeof (buffer));
	length = strlen(vpd->serial_num);
	len = (length > 16) ? 16 : length;
	bcopy(&vpd->serial_num[(length - len)],
	    &buffer[(sizeof (buffer) - len)], len);

	port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;

#endif /* >= EMLXS_MODREV5 */

#if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4))

	port_info->pi_rnid_params.params.num_attached = 0;

	if (hba->flag & FC_NPIV_ENABLED) {
		uint8_t		byte;
		uint8_t		*wwpn;
		uint32_t	i;
		uint32_t	j;

		/* Copy the WWPN as a string into the local buffer */
		wwpn = (uint8_t *)&hba->wwpn;
		for (i = 0; i < 16; i++) {
			byte = *wwpn++;
			j = ((byte & 0xf0) >> 4);
			if (j <= 9) {
				buffer[i] =
				    (char)((uint8_t)'0' + (uint8_t)j);
			} else {
				buffer[i] =
				    (char)((uint8_t)'A' + (uint8_t)(j -
				    10));
			}

			i++;
			j = (byte & 0xf);
			if (j <= 9) {
				buffer[i] =
				    (char)((uint8_t)'0' + (uint8_t)j);
			} else {
				buffer[i] =
				    (char)((uint8_t)'A' + (uint8_t)(j -
				    10));
			}
			}

		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
	} else {
		/* Copy the serial number string (right most 16 chars) */
		/* into the right justified local buffer */
		bzero(buffer, sizeof (buffer));
		length = strlen(vpd->serial_num);
		len = (length > 16) ? 16 : length;
		bcopy(&vpd->serial_num[(length - len)],
		    &buffer[(sizeof (buffer) - len)], len);

		port_info->pi_attrs.hba_fru_details.port_index =
		    vpd->port_index;
	}

#endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */

#if (EMLXS_MODREV >= EMLXS_MODREV3)

	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
	dptr[0] = buffer[0];
	dptr[1] = buffer[1];
	dptr[2] = buffer[2];
	dptr[3] = buffer[3];
	dptr[4] = buffer[4];
	dptr[5] = buffer[5];
	dptr[6] = buffer[6];
	dptr[7] = buffer[7];
	port_info->pi_attrs.hba_fru_details.high =
	    SWAP_DATA64(port_info->pi_attrs.hba_fru_details.high);

	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
	dptr[0] = buffer[8];
	dptr[1] = buffer[9];
	dptr[2] = buffer[10];
	dptr[3] = buffer[11];
	dptr[4] = buffer[12];
	dptr[5] = buffer[13];
	dptr[6] = buffer[14];
	dptr[7] = buffer[15];
	port_info->pi_attrs.hba_fru_details.low =
	    SWAP_DATA64(port_info->pi_attrs.hba_fru_details.low);

#endif /* >= EMLXS_MODREV3 */

#if (EMLXS_MODREV >= EMLXS_MODREV4)
	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
#endif	/* >= EMLXS_MODREV4 */

	(void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);

	/* Set the hba speed limit */
	if (vpd->link_speed & LMT_10GB_CAPABLE) {
		port_info->pi_attrs.supported_speed |=
		    FC_HBA_PORTSPEED_10GBIT;
	}
	if (vpd->link_speed & LMT_8GB_CAPABLE) {
		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
	}
	if (vpd->link_speed & LMT_4GB_CAPABLE) {
		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
	}
	if (vpd->link_speed & LMT_2GB_CAPABLE) {
		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
	}
	if (vpd->link_speed & LMT_1GB_CAPABLE) {
		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
	}

	/* Set the hba model info */
	(void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
	(void) strcpy(port_info->pi_attrs.model_description,
	    hba->model_info.model_desc);


	/* Log information */
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Bind info: port_num           = %d", bind_info->port_num);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Bind info: port_handle        = %p", bind_info->port_handle);

#if (EMLXS_MODREV >= EMLXS_MODREV5)
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
#endif /* >= EMLXS_MODREV5 */

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: pi_topology        = %x", port_info->pi_topology);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: pi_error           = %x", port_info->pi_error);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: pi_port_state      = %x", port_info->pi_port_state);

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: priv_lilp_posit    = %x",
	    port_info->pi_s_id.priv_lilp_posit);

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: hard_addr          = %x",
	    port_info->pi_hard_addr.hard_addr);

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: rnid.status        = %x",
	    port_info->pi_rnid_params.status);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: rnid.global_id     = %16s",
	    port_info->pi_rnid_params.params.global_id);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: rnid.unit_type     = %x",
	    port_info->pi_rnid_params.params.unit_type);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: rnid.port_id       = %x",
	    port_info->pi_rnid_params.params.port_id);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: rnid.num_attached  = %x",
	    port_info->pi_rnid_params.params.num_attached);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: rnid.ip_version    = %x",
	    port_info->pi_rnid_params.params.ip_version);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: rnid.udp_port      = %x",
	    port_info->pi_rnid_params.params.udp_port);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: rnid.ip_addr       = %16s",
	    port_info->pi_rnid_params.params.ip_addr);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: rnid.spec_id_resv  = %x",
	    port_info->pi_rnid_params.params.specific_id_resv);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: rnid.topo_flags    = %x",
	    port_info->pi_rnid_params.params.topo_flags);

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: manufacturer       = %s",
	    port_info->pi_attrs.manufacturer);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: serial_num         = %s",
	    port_info->pi_attrs.serial_number);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: model              = %s", port_info->pi_attrs.model);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: model_description  = %s",
	    port_info->pi_attrs.model_description);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: hardware_version   = %s",
	    port_info->pi_attrs.hardware_version);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: driver_version     = %s",
	    port_info->pi_attrs.driver_version);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: option_rom_version = %s",
	    port_info->pi_attrs.option_rom_version);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: firmware_version   = %s",
	    port_info->pi_attrs.firmware_version);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: driver_name        = %s",
	    port_info->pi_attrs.driver_name);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: vendor_specific_id = %x",
	    port_info->pi_attrs.vendor_specific_id);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: supported_cos      = %x",
	    port_info->pi_attrs.supported_cos);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: supported_speed    = %x",
	    port_info->pi_attrs.supported_speed);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: max_frame_size     = %x",
	    port_info->pi_attrs.max_frame_size);

#if (EMLXS_MODREV >= EMLXS_MODREV3)
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: fru_port_index     = %x",
	    port_info->pi_attrs.hba_fru_details.port_index);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: fru_high           = %llx",
	    port_info->pi_attrs.hba_fru_details.high);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: fru_low            = %llx",
	    port_info->pi_attrs.hba_fru_details.low);
#endif	/* >= EMLXS_MODREV3 */

#if (EMLXS_MODREV >= EMLXS_MODREV4)
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: sym_node_name      = %s",
	    port_info->pi_attrs.sym_node_name);
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "Port info: sym_port_name      = %s",
	    port_info->pi_attrs.sym_port_name);
#endif	/* >= EMLXS_MODREV4 */

	/* Set the bound flag */
	port->flag |= EMLXS_PORT_BOUND;
	hba->num_of_ports++;

	mutex_exit(&EMLXS_PORT_LOCK);

	return ((opaque_t)port);

} /* emlxs_bind_port() */


static void
emlxs_unbind_port(opaque_t fca_port_handle)
{
	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
	emlxs_hba_t *hba = HBA;
	uint32_t count;

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
	    "fca_unbind_port: port=%p", port);

	/* Check ub buffer pools */
	if (port->ub_pool) {
		mutex_enter(&EMLXS_UB_LOCK);

		/* Wait up to 10 seconds for all ub pools to be freed */
		count = 10 * 2;
		while (port->ub_pool && count) {
			mutex_exit(&EMLXS_UB_LOCK);
			delay(drv_usectohz(500000));	/* half second wait */
			count--;
			mutex_enter(&EMLXS_UB_LOCK);
		}

		if (port->ub_pool) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_unbind_port: Unsolicited buffers still "
			    "active. port=%p. Destroying...", port);

			/* Destroy all pools */
			while (port->ub_pool) {
				emlxs_ub_destroy(port, port->ub_pool);
			}
		}

		mutex_exit(&EMLXS_UB_LOCK);
	}

	/* Destroy & flush all port nodes, if they exist */
	if (port->node_count) {
		(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
	}
#if (EMLXS_MODREV >= EMLXS_MODREV5)
	if ((hba->flag & FC_NPIV_ENABLED) &&
	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
		(void) emlxs_mb_unreg_vpi(port);
	}
#endif

	mutex_enter(&EMLXS_PORT_LOCK);

	if (!(port->flag & EMLXS_PORT_BOUND)) {
		mutex_exit(&EMLXS_PORT_LOCK);
		return;
	}

	port->flag &= ~EMLXS_PORT_BOUND;
	hba->num_of_ports--;

	port->ulp_handle = 0;
	port->ulp_statec = FC_STATE_OFFLINE;
	port->ulp_statec_cb = NULL;
	port->ulp_unsol_cb = NULL;

	mutex_exit(&EMLXS_PORT_LOCK);

	return;

} /* emlxs_unbind_port() */


/*ARGSUSED*/
extern int
emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
{
	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
	emlxs_hba_t  *hba = HBA;
	emlxs_buf_t  *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;

	if (!sbp) {
		return (FC_FAILURE);
	}

	bzero((void *)sbp, sizeof (emlxs_buf_t));

	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg);
	sbp->pkt_flags =
	    PACKET_VALID | PACKET_RETURNED;
	sbp->port = port;
	sbp->pkt = pkt;
	sbp->iocbq.sbp = sbp;

	return (FC_SUCCESS);

} /* emlxs_pkt_init() */



static void
emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
{
	emlxs_hba_t *hba = HBA;
	emlxs_config_t *cfg = &CFG;
	fc_packet_t *pkt = PRIV2PKT(sbp);
	uint32_t *iptr;

	mutex_enter(&sbp->mtx);

	/* Reinitialize */
	sbp->pkt   = pkt;
	sbp->port  = port;
	sbp->bmp   = NULL;
	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
	sbp->iotag = 0;
	sbp->ticks = 0;
	sbp->abort_attempts = 0;
	sbp->fpkt  = NULL;
	sbp->flush_count = 0;
	sbp->next  = NULL;

	if (!port->tgt_mode) {
		sbp->node  = NULL;
		sbp->did   = 0;
		sbp->lun   = 0;
		sbp->class = 0;
		sbp->ring  = NULL;
		sbp->class = 0;
	}

	bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
	sbp->iocbq.sbp = sbp;

	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
	    ddi_in_panic()) {
		sbp->pkt_flags |= PACKET_POLLED;
	}

	/* Prepare the fc packet */
	pkt->pkt_state = FC_PKT_SUCCESS;
	pkt->pkt_reason = 0;
	pkt->pkt_action = 0;
	pkt->pkt_expln = 0;
	pkt->pkt_data_resid = 0;
	pkt->pkt_resp_resid = 0;

	/* Make sure all pkt's have a proper timeout */
	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
		/* This disables all IOCB on chip timeouts */
		pkt->pkt_timeout = 0x80000000;
	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
		pkt->pkt_timeout = 60;
	}

	/* Clear the response buffer */
	if (pkt->pkt_rsplen) {
		/* Check for FCP commands */
		if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
		    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
			iptr = (uint32_t *)pkt->pkt_resp;
			iptr[2] = 0;
			iptr[3] = 0;
		} else {
		bzero(pkt->pkt_resp, pkt->pkt_rsplen);
	}
	}

	mutex_exit(&sbp->mtx);

	return;

} /* emlxs_initialize_pkt() */



/*
 * We may not need this routine
 */
/*ARGSUSED*/
extern int
emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
{
	emlxs_buf_t  *sbp = PKT2PRIV(pkt);

	if (!sbp) {
		return (FC_FAILURE);
	}

	if (!(sbp->pkt_flags & PACKET_VALID)) {
		return (FC_FAILURE);
	}

	sbp->pkt_flags &= ~PACKET_VALID;
	mutex_destroy(&sbp->mtx);

	return (FC_SUCCESS);

} /* emlxs_pkt_uninit() */


static int
emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
{
	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
	emlxs_hba_t  *hba = HBA;
	int32_t rval;

	if (!(port->flag & EMLXS_PORT_BOUND)) {
		return (FC_CAP_ERROR);
	}

	if (strcmp(cap, FC_NODE_WWN) == 0) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_get_cap: FC_NODE_WWN");

		bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
		rval = FC_CAP_FOUND;

	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_get_cap: FC_LOGIN_PARAMS");

		/*
		 * We need to turn off CLASS2 support.
		 * Otherwise, FC transport will use CLASS2 as default class
		 * and never try with CLASS3.
		 */
		hba->sparam.cls2.classValid = 0;

		bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));

		rval = FC_CAP_FOUND;

	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
		int32_t		*num_bufs;
		emlxs_config_t	*cfg = &CFG;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
		    cfg[CFG_UB_BUFS].current);

		num_bufs = (int32_t *)ptr;

		/* We multiply by MAX_VPORTS because ULP uses a */
		/* formula to calculate ub bufs from this */
		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);

		rval = FC_CAP_FOUND;

	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
		int32_t		*size;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");

		size = (int32_t *)ptr;
		*size = -1;
		rval = FC_CAP_FOUND;

	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
		fc_reset_action_t *action;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");

		action = (fc_reset_action_t *)ptr;
		*action = FC_RESET_RETURN_ALL;
		rval = FC_CAP_FOUND;

	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
		fc_dma_behavior_t *behavior;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");

		behavior = (fc_dma_behavior_t *)ptr;
		*behavior = FC_ALLOW_STREAMING;
		rval = FC_CAP_FOUND;

	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
		fc_fcp_dma_t   *fcp_dma;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_get_cap: FC_CAP_FCP_DMA");

		fcp_dma = (fc_fcp_dma_t *)ptr;
		*fcp_dma = FC_DVMA_SPACE;
		rval = FC_CAP_FOUND;

	} else {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_get_cap: Unknown capability. [%s]", cap);

		rval = FC_CAP_ERROR;

	}

	return (rval);

} /* emlxs_get_cap() */



static int
emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
{
	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);

	return (FC_CAP_ERROR);

} /* emlxs_set_cap() */


static opaque_t
emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
{
	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
	    "fca_get_device: did=%x", d_id);

	return (NULL);

} /* emlxs_get_device() */


static int32_t
emlxs_notify(opaque_t fca_port_handle, uint32_t cmd)
{
	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
	    cmd);

	return (FC_SUCCESS);

} /* emlxs_notify */



static int
emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
{
	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
	emlxs_hba_t	*hba = HBA;
	uint32_t	lilp_length;

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
	    port->alpa_map[3], port->alpa_map[4]);

	if (!(port->flag & EMLXS_PORT_BOUND)) {
		return (FC_NOMAP);
	}

	if (hba->topology != TOPOLOGY_LOOP) {
		return (FC_NOMAP);
	}

	/* Check if alpa map is available */
	if (port->alpa_map[0] != 0) {
		mapbuf->lilp_magic  = MAGIC_LILP;
	} else {	/* No LILP map available */

		/* Set lilp_magic to MAGIC_LISA and this will */
		/* trigger an ALPA scan in ULP */
		mapbuf->lilp_magic  = MAGIC_LISA;
	}

	mapbuf->lilp_myalpa = port->did;

	/* The first byte of the alpa_map is the lilp map length */
	/* Add one to include the lilp length byte itself */
	lilp_length = (uint32_t)port->alpa_map[0] + 1;

	/* Make sure the max transfer is 128 bytes */
	if (lilp_length > 128) {
		lilp_length = 128;
	}

	/* We start copying from the lilp_length field */
	/* in order to get a word aligned address */
	bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
	    lilp_length);

	return (FC_SUCCESS);

} /* emlxs_get_map() */



extern int
emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
{
	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
	emlxs_hba_t	*hba = HBA;
	emlxs_buf_t	*sbp;
	uint32_t	rval;
	uint32_t	pkt_flags;

	/* Make sure adapter is online */
	if (!(hba->flag & FC_ONLINE_MODE)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
		    "Adapter offline.");

		return (FC_OFFLINE);
	}

	/* Validate packet */
	sbp = PKT2PRIV(pkt);

	/* Make sure ULP was told that the port was online */
	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
		    "Port offline.");

		return (FC_OFFLINE);
	}

	if (sbp->port != port) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
		    "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
		    sbp->port, sbp->pkt_flags);
		return (FC_BADPACKET);
	}

	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_RETURNED))) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
		    "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
		    sbp->port, sbp->pkt_flags);
		return (FC_BADPACKET);
	}
#ifdef SFCT_SUPPORT
	if (port->tgt_mode && !sbp->fct_cmd &&
	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
		    "Packet blocked. Target mode.");
		return (FC_TRANSPORT_ERROR);
	}
#endif /* SFCT_SUPPORT */

#ifdef IDLE_TIMER
	emlxs_pm_busy_component(hba);
#endif	/* IDLE_TIMER */

	/* Prepare the packet for transport */
	emlxs_initialize_pkt(port, sbp);

	/* Save a copy of the pkt flags. */
	/* We will check the polling flag later */
	pkt_flags = sbp->pkt_flags;

	/* Send the packet */
	switch (pkt->pkt_tran_type) {
	case FC_PKT_FCP_READ:
	case FC_PKT_FCP_WRITE:
		rval = emlxs_send_fcp_cmd(port, sbp);
		break;

	case FC_PKT_IP_WRITE:
	case FC_PKT_BROADCAST:
		rval = emlxs_send_ip(port, sbp);
		break;

	case FC_PKT_EXCHANGE:
		switch (pkt->pkt_cmd_fhdr.type) {
		case FC_TYPE_SCSI_FCP:
			rval = emlxs_send_fcp_cmd(port, sbp);
			break;

		case FC_TYPE_FC_SERVICES:
			rval = emlxs_send_ct(port, sbp);
			break;

#ifdef MENLO_SUPPORT
		case EMLXS_MENLO_TYPE:
			rval = emlxs_send_menlo(port, sbp);
			break;
#endif /* MENLO_SUPPORT */

		default:
			rval = emlxs_send_els(port, sbp);
		}
		break;

	case FC_PKT_OUTBOUND:
		switch (pkt->pkt_cmd_fhdr.type) {
#ifdef SFCT_SUPPORT
		case FC_TYPE_SCSI_FCP:
			rval = emlxs_send_fct_status(port, sbp);
			break;

		case FC_TYPE_BASIC_LS:
			rval = emlxs_send_fct_abort(port, sbp);
			break;
#endif /* SFCT_SUPPORT */

		case FC_TYPE_FC_SERVICES:
			rval = emlxs_send_ct_rsp(port, sbp);
			break;
#ifdef MENLO_SUPPORT
		case EMLXS_MENLO_TYPE:
			rval = emlxs_send_menlo(port, sbp);
			break;
#endif /* MENLO_SUPPORT */

		default:
			rval = emlxs_send_els_rsp(port, sbp);
		}
		break;

	default:
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
		rval = FC_TRANSPORT_ERROR;
		break;
	}

	/* Check if send was not successful */
	if (rval != FC_SUCCESS) {
		/* Return packet to ULP */
		mutex_enter(&sbp->mtx);
		sbp->pkt_flags |= PACKET_RETURNED;
		mutex_exit(&sbp->mtx);

		return (rval);
	}

	/* Check if this packet should be polled for completion before */
	/* returning. This check must be done with a saved copy of the */
	/* pkt_flags because the packet itself could already be freed from */
	/* memory if it was not polled. */
	if (pkt_flags & PACKET_POLLED) {
		emlxs_poll(port, sbp);
	}

	return (FC_SUCCESS);

} /* emlxs_transport() */



static void
emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
{
	emlxs_hba_t	*hba = HBA;
	fc_packet_t	*pkt = PRIV2PKT(sbp);
	clock_t		timeout;
	clock_t		time;
	uint32_t	att_bit;
	emlxs_ring_t	*rp;

	mutex_enter(&EMLXS_PORT_LOCK);
	hba->io_poll_count++;
	mutex_exit(&EMLXS_PORT_LOCK);

	/* Check for panic situation */
	if (ddi_in_panic()) {
		/*
		 * In panic situations there will be one thread with
		 * no interrrupts (hard or soft) and no timers
		 */

		/*
		 * We must manually poll everything in this thread
		 * to keep the driver going.
		 */
		rp = (emlxs_ring_t *)sbp->ring;
		switch (rp->ringno) {
		case FC_FCP_RING:
			att_bit = HA_R0ATT;
			break;

		case FC_IP_RING:
			att_bit = HA_R1ATT;
			break;

		case FC_ELS_RING:
			att_bit = HA_R2ATT;
			break;

		case FC_CT_RING:
			att_bit = HA_R3ATT;
			break;
		}

		/* Keep polling the chip until our IO is completed */
		/* Driver's timer will not function during panics. */
		/* Therefore, timer checks must be performed manually. */
		(void) drv_getparm(LBOLT, &time);
		timeout = time + drv_usectohz(1000000);
		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
			emlxs_sli_poll_intr(hba, att_bit);
			(void) drv_getparm(LBOLT, &time);

			/* Trigger timer checks periodically */
			if (time >= timeout) {
				emlxs_timer_checks(hba);
				timeout = time + drv_usectohz(1000000);
			}
		}
	} else {
		/* Wait for IO completion */
		/* The driver's timer will detect */
		/* any timeout and abort the I/O. */
		mutex_enter(&EMLXS_PKT_LOCK);
		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
			cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
		}
		mutex_exit(&EMLXS_PKT_LOCK);
	}

	/* Check for fcp reset pkt */
	if (sbp->pkt_flags & PACKET_FCP_RESET) {
		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
			/* Flush the IO's on the chipq */
			(void) emlxs_chipq_node_flush(port,
			    &hba->ring[FC_FCP_RING], sbp->node, sbp);
		} else {
			/* Flush the IO's on the chipq for this lun */
			(void) emlxs_chipq_lun_flush(port,
			    sbp->node, sbp->lun, sbp);
		}

		if (sbp->flush_count == 0) {
			emlxs_node_open(port, sbp->node, FC_FCP_RING);
			goto done;
		}

		/* Set the timeout so the flush has time to complete */
		timeout = emlxs_timeout(hba, 60);
		(void) drv_getparm(LBOLT, &time);
		while ((time < timeout) && sbp->flush_count > 0) {
			delay(drv_usectohz(500000));
			(void) drv_getparm(LBOLT, &time);
		}

		if (sbp->flush_count == 0) {
			emlxs_node_open(port, sbp->node, FC_FCP_RING);
			goto done;
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
		    "sbp=%p flush_count=%d. Waiting...", sbp,
		    sbp->flush_count);

		/* Let's try this one more time */

		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
			/* Flush the IO's on the chipq */
			(void) emlxs_chipq_node_flush(port,
			    &hba->ring[FC_FCP_RING], sbp->node, sbp);
		} else {
			/* Flush the IO's on the chipq for this lun */
			(void) emlxs_chipq_lun_flush(port,
			    sbp->node, sbp->lun, sbp);
		}

		/* Reset the timeout so the flush has time to complete */
		timeout = emlxs_timeout(hba, 60);
		(void) drv_getparm(LBOLT, &time);
		while ((time < timeout) && sbp->flush_count > 0) {
			delay(drv_usectohz(500000));
			(void) drv_getparm(LBOLT, &time);
		}

		if (sbp->flush_count == 0) {
			emlxs_node_open(port, sbp->node, FC_FCP_RING);
			goto done;
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
		    "sbp=%p flush_count=%d. Resetting link.", sbp,
		    sbp->flush_count);

		/* Let's first try to reset the link */
		(void) emlxs_reset(port, FC_FCA_LINK_RESET);

		if (sbp->flush_count == 0) {
			goto done;
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
		    "sbp=%p flush_count=%d. Resetting HBA.", sbp,
		    sbp->flush_count);

		/* If that doesn't work, reset the adapter */
		(void) emlxs_reset(port, FC_FCA_RESET);

		if (sbp->flush_count != 0) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
			    "sbp=%p flush_count=%d. Giving up.", sbp,
			    sbp->flush_count);
		}

	}
	/* PACKET_FCP_RESET */
done:

	/* Packet has been declared completed and is now ready to be returned */

#if (EMLXS_MODREVX == EMLXS_MODREV2X)
	emlxs_unswap_pkt(sbp);
#endif	/* EMLXS_MODREV2X */

	mutex_enter(&sbp->mtx);
	sbp->pkt_flags |= PACKET_RETURNED;
	mutex_exit(&sbp->mtx);

	mutex_enter(&EMLXS_PORT_LOCK);
	hba->io_poll_count--;
	mutex_exit(&EMLXS_PORT_LOCK);

	/* Make ULP completion callback if required */
	if (pkt->pkt_comp) {
		(*pkt->pkt_comp) (pkt);
	}

	return;

} /* emlxs_poll() */


static int
emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
    uint32_t *count, uint32_t type)
{
	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
	emlxs_hba_t		*hba = HBA;

	char			*err = NULL;
	emlxs_unsol_buf_t	*pool;
	emlxs_unsol_buf_t	*new_pool;
	int32_t			i;
	int			result;
	uint32_t		free_resv;
	uint32_t		free;
	emlxs_config_t		*cfg = &CFG;
	fc_unsol_buf_t		*ubp;
	emlxs_ub_priv_t		*ub_priv;

	if (port->tgt_mode) {
		if (tokens && count) {
			bzero(tokens, (sizeof (uint64_t) * (*count)));
		}
		return (FC_SUCCESS);
	}

	if (!(port->flag & EMLXS_PORT_BOUND)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "ub_alloc failed: Port not bound!  size=%x count=%d "
		    "type=%x", size, *count, type);

		return (FC_FAILURE);
	}

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
	    "ub_alloc: size=%x count=%d type=%x", size, *count, type);

	if (count && (*count > EMLXS_MAX_UBUFS)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
		    "ub_alloc failed: Too many unsolicted buffers requested. "
		    "count=%x", *count);

		return (FC_FAILURE);

	}

	if (tokens == NULL) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
		    "ub_alloc failed: Token array is NULL.");

		return (FC_FAILURE);
	}

	/* Clear the token array */
	bzero(tokens, (sizeof (uint64_t) * (*count)));

	free_resv = 0;
	free = *count;
	switch (type) {
	case FC_TYPE_BASIC_LS:
		err = "BASIC_LS";
		break;
	case FC_TYPE_EXTENDED_LS:
		err = "EXTENDED_LS";
		free = *count / 2;	/* Hold 50% for normal use */
		free_resv = *count - free;	/* Reserve 50% for RSCN use */
		break;
	case FC_TYPE_IS8802:
		err = "IS8802";
		break;
	case FC_TYPE_IS8802_SNAP:
		err = "IS8802_SNAP";

		if (cfg[CFG_NETWORK_ON].current == 0) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
			    "ub_alloc failed: IP support is disabled.");

			return (FC_FAILURE);
		}
		break;
	case FC_TYPE_SCSI_FCP:
		err = "SCSI_FCP";
		break;
	case FC_TYPE_SCSI_GPP:
		err = "SCSI_GPP";
		break;
	case FC_TYPE_HIPP_FP:
		err = "HIPP_FP";
		break;
	case FC_TYPE_IPI3_MASTER:
		err = "IPI3_MASTER";
		break;
	case FC_TYPE_IPI3_SLAVE:
		err = "IPI3_SLAVE";
		break;
	case FC_TYPE_IPI3_PEER:
		err = "IPI3_PEER";
		break;
	case FC_TYPE_FC_SERVICES:
		err = "FC_SERVICES";
		break;
	}

	mutex_enter(&EMLXS_UB_LOCK);

	/*
	 * Walk through the list of the unsolicited buffers
	 * for this ddiinst of emlx.
	 */

	pool = port->ub_pool;

	/*
	 * The emlxs_ub_alloc() can be called more than once with different
	 * size. We will reject the call if there are
	 * duplicate size with the same FC-4 type.
	 */
	while (pool) {
		if ((pool->pool_type == type) &&
		    (pool->pool_buf_size == size)) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
			    "ub_alloc failed: Unsolicited buffer pool for %s "
			    "of size 0x%x bytes already exists.", err, size);

			result = FC_FAILURE;
			goto fail;
		}

		pool = pool->pool_next;
	}

	new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
	    KM_SLEEP);
	if (new_pool == NULL) {
		result = FC_FAILURE;
		goto fail;
	}

	new_pool->pool_next = NULL;
	new_pool->pool_type = type;
	new_pool->pool_buf_size = size;
	new_pool->pool_nentries = *count;
	new_pool->pool_available = new_pool->pool_nentries;
	new_pool->pool_free = free;
	new_pool->pool_free_resv = free_resv;
	new_pool->fc_ubufs =
	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);

	if (new_pool->fc_ubufs == NULL) {
		kmem_free(new_pool, sizeof (emlxs_unsol_buf_t));
		result = FC_FAILURE;
		goto fail;
	}

	new_pool->pool_first_token = port->ub_count;
	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;

	for (i = 0; i < new_pool->pool_nentries; i++) {
		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
		ubp->ub_port_handle = port->ulp_handle;
		ubp->ub_token = (uint64_t)((unsigned long)ubp);
		ubp->ub_bufsize = size;
		ubp->ub_class = FC_TRAN_CLASS3;
		ubp->ub_port_private = NULL;
		ubp->ub_fca_private =
		    (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
		    KM_SLEEP);

		if (ubp->ub_fca_private == NULL) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
			    "ub_alloc failed: Unable to allocate fca_private "
			    "object.");

			result = FC_FAILURE;
			goto fail;
		}

		/*
		 * Initialize emlxs_ub_priv_t
		 */
		ub_priv = ubp->ub_fca_private;
		ub_priv->ubp = ubp;
		ub_priv->port = port;
		ub_priv->flags = EMLXS_UB_FREE;
		ub_priv->available = 1;
		ub_priv->pool = new_pool;
		ub_priv->time = 0;
		ub_priv->timeout = 0;
		ub_priv->token = port->ub_count;
		ub_priv->cmd = 0;

		/* Allocate the actual buffer */
		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);

		/* Check if we were not successful */
		if (ubp->ub_buffer == NULL) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
			    "ub_alloc failed: Unable to allocate buffer.");

			/* Free the private area of the current object */
			kmem_free(ubp->ub_fca_private,
			    sizeof (emlxs_ub_priv_t));

			result = FC_FAILURE;
			goto fail;
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
		    "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp,
		    ub_priv->token, ubp->ub_bufsize, type);

		tokens[i] = (uint64_t)((unsigned long)ubp);
		port->ub_count++;
	}

	/* Add the pool to the top of the pool list */
	new_pool->pool_prev = NULL;
	new_pool->pool_next = port->ub_pool;

	if (port->ub_pool) {
		port->ub_pool->pool_prev = new_pool;
	}
	port->ub_pool = new_pool;

	/* Set the post counts */
	if (type == FC_TYPE_IS8802_SNAP) {
		MAILBOXQ	*mbox;

		port->ub_post[FC_IP_RING] += new_pool->pool_nentries;

		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
		    MEM_MBOX | MEM_PRI))) {
			emlxs_mb_config_farp(hba, (MAILBOX *)mbox);
			if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mbox,
			    MBX_NOWAIT, 0) != MBX_BUSY) {
				(void) emlxs_mem_put(hba, MEM_MBOX,
				    (uint8_t *)mbox);
			}
		}
		port->flag |= EMLXS_PORT_IP_UP;
	} else if (type == FC_TYPE_EXTENDED_LS) {
		port->ub_post[FC_ELS_RING] += new_pool->pool_nentries;
	} else if (type == FC_TYPE_FC_SERVICES) {
		port->ub_post[FC_CT_RING] += new_pool->pool_nentries;
	}

	mutex_exit(&EMLXS_UB_LOCK);

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
	    *count, err, size);

	return (FC_SUCCESS);

fail:

	/* Clean the pool */
	for (i = 0; tokens[i] != NULL; i++) {
		/* Get the buffer object */
		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
		    "ub_alloc failed: Freed buffer=%p token=%x size=%x "
		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);

		/* Free the actual buffer */
		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);

		/* Free the private area of the buffer object */
		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));

		tokens[i] = 0;
		port->ub_count--;
	}

	/* Free the array of buffer objects in the pool */
	kmem_free((caddr_t)new_pool->fc_ubufs,
	    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));

	/* Free the pool object */
	kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));

	mutex_exit(&EMLXS_UB_LOCK);

	return (result);

} /* emlxs_ub_alloc() */


static void
emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
{
	emlxs_hba_t	*hba = HBA;
	emlxs_ub_priv_t	*ub_priv;
	fc_packet_t	*pkt;
	ELS_PKT		*els;
	uint32_t	sid;

	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;

	if (hba->state <= FC_LINK_DOWN) {
		return;
	}

	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
	    sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
		return;
	}

	sid = SWAP_DATA24_LO(ubp->ub_frame.s_id);

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
	    "%s dropped: sid=%x. Rejecting.",
	    emlxs_elscmd_xlate(ub_priv->cmd), sid);

	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
	pkt->pkt_timeout = (2 * hba->fc_ratov);

	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
	}

	/* Build the fc header */
	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
	pkt->pkt_cmd_fhdr.r_ctl =
	    R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
	pkt->pkt_cmd_fhdr.s_id = SWAP_DATA24_LO(port->did);
	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
	pkt->pkt_cmd_fhdr.f_ctl =
	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
	pkt->pkt_cmd_fhdr.seq_id = 0;
	pkt->pkt_cmd_fhdr.df_ctl = 0;
	pkt->pkt_cmd_fhdr.seq_cnt = 0;
	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
	pkt->pkt_cmd_fhdr.ro = 0;

	/* Build the command */
	els = (ELS_PKT *) pkt->pkt_cmd;
	els->elsCode = 0x01;
	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
	els->un.lsRjt.un.b.vendorUnique = 0x02;

	/* Send the pkt later in another thread */
	(void) emlxs_pkt_send(pkt, 0);

	return;

} /* emlxs_ub_els_reject() */

extern int
emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
{
	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
	emlxs_hba_t		*hba = HBA;
	fc_unsol_buf_t		*ubp;
	emlxs_ub_priv_t		*ub_priv;
	uint32_t		i;
	uint32_t		time;
	emlxs_unsol_buf_t	*pool;

	if (count == 0) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "ub_release: Nothing to do. count=%d", count);

		return (FC_SUCCESS);
	}

	if (!(port->flag & EMLXS_PORT_BOUND)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "ub_release failed: Port not bound. count=%d token[0]=%p",
		    count, tokens[0]);

		return (FC_UNBOUND);
	}

	mutex_enter(&EMLXS_UB_LOCK);

	if (!port->ub_pool) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "ub_release failed: No pools! count=%d token[0]=%p",
		    count, tokens[0]);

		mutex_exit(&EMLXS_UB_LOCK);
		return (FC_UB_BADTOKEN);
	}

	for (i = 0; i < count; i++) {
		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);

		if (!ubp) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "ub_release failed: count=%d tokens[%d]=0", count,
			    i);

			mutex_exit(&EMLXS_UB_LOCK);
			return (FC_UB_BADTOKEN);
		}

		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;

		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "ub_release failed: Dead buffer found. ubp=%p",
			    ubp);

			mutex_exit(&EMLXS_UB_LOCK);
			return (FC_UB_BADTOKEN);
		}

		if (ub_priv->flags == EMLXS_UB_FREE) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "ub_release: Buffer already free! ubp=%p token=%x",
			    ubp, ub_priv->token);

			continue;
		}

		/* Check for dropped els buffer */
		/* ULP will do this sometimes without sending a reply */
		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
			emlxs_ub_els_reject(port, ubp);
		}

		/* Mark the buffer free */
		ub_priv->flags = EMLXS_UB_FREE;
		bzero(ubp->ub_buffer, ubp->ub_bufsize);

		time = hba->timer_tics - ub_priv->time;
		ub_priv->time = 0;
		ub_priv->timeout = 0;

		pool = ub_priv->pool;

		if (ub_priv->flags & EMLXS_UB_RESV) {
			pool->pool_free_resv++;
		} else {
			pool->pool_free++;
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
		    "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)",
		    ubp, ub_priv->token, time, ub_priv->available,
		    pool->pool_nentries, pool->pool_available,
		    pool->pool_free, pool->pool_free_resv);

		/* Check if pool can be destroyed now */
		if ((pool->pool_available == 0) &&
		    (pool->pool_free + pool->pool_free_resv ==
		    pool->pool_nentries)) {
			emlxs_ub_destroy(port, pool);
		}
	}

	mutex_exit(&EMLXS_UB_LOCK);

	return (FC_SUCCESS);

} /* emlxs_ub_release() */


static int
emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
{
	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
	emlxs_unsol_buf_t	*pool;
	fc_unsol_buf_t		*ubp;
	emlxs_ub_priv_t		*ub_priv;
	uint32_t		i;

	if (port->tgt_mode) {
		return (FC_SUCCESS);
	}

	if (count == 0) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "ub_free: Nothing to do. count=%d token[0]=%p", count,
		    tokens[0]);

		return (FC_SUCCESS);
	}

	if (!(port->flag & EMLXS_PORT_BOUND)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "ub_free: Port not bound. count=%d token[0]=%p", count,
		    tokens[0]);

		return (FC_SUCCESS);
	}

	mutex_enter(&EMLXS_UB_LOCK);

	if (!port->ub_pool) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "ub_free failed: No pools! count=%d token[0]=%p", count,
		    tokens[0]);

		mutex_exit(&EMLXS_UB_LOCK);
		return (FC_UB_BADTOKEN);
	}

	/* Process buffer list */
	for (i = 0; i < count; i++) {
		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);

		if (!ubp) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "ub_free failed: count=%d tokens[%d]=0", count,
			    i);

			mutex_exit(&EMLXS_UB_LOCK);
			return (FC_UB_BADTOKEN);
		}

		/* Mark buffer unavailable */
		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;

		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "ub_free failed: Dead buffer found. ubp=%p", ubp);

			mutex_exit(&EMLXS_UB_LOCK);
			return (FC_UB_BADTOKEN);
		}

		ub_priv->available = 0;

		/* Mark one less buffer available in the parent pool */
		pool = ub_priv->pool;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
		    "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
		    ub_priv->token, pool->pool_nentries,
		    pool->pool_available - 1, pool->pool_free,
		    pool->pool_free_resv);

		if (pool->pool_available) {
			pool->pool_available--;

			/* Check if pool can be destroyed */
			if ((pool->pool_available == 0) &&
			    (pool->pool_free + pool->pool_free_resv ==
			    pool->pool_nentries)) {
				emlxs_ub_destroy(port, pool);
			}
		}
	}

	mutex_exit(&EMLXS_UB_LOCK);

	return (FC_SUCCESS);

} /* emlxs_ub_free() */


/* EMLXS_UB_LOCK must be held when calling this routine */
extern void
emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
{
	emlxs_unsol_buf_t	*next;
	emlxs_unsol_buf_t	*prev;
	fc_unsol_buf_t		*ubp;
	uint32_t		i;

	/* Remove the pool object from the pool list */
	next = pool->pool_next;
	prev = pool->pool_prev;

	if (port->ub_pool == pool) {
		port->ub_pool = next;
	}

	if (prev) {
		prev->pool_next = next;
	}

	if (next) {
		next->pool_prev = prev;
	}

	pool->pool_prev = NULL;
	pool->pool_next = NULL;

	/* Clear the post counts */
	switch (pool->pool_type) {
	case FC_TYPE_IS8802_SNAP:
		port->ub_post[FC_IP_RING] -= pool->pool_nentries;
		break;

	case FC_TYPE_EXTENDED_LS:
		port->ub_post[FC_ELS_RING] -= pool->pool_nentries;
		break;

	case FC_TYPE_FC_SERVICES:
		port->ub_post[FC_CT_RING] -= pool->pool_nentries;
		break;
	}

	/* Now free the pool memory */
	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
	    "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
	    pool->pool_type, pool->pool_buf_size, pool->pool_nentries);

	/* Process the array of buffer objects in the pool */
	for (i = 0; i < pool->pool_nentries; i++) {
		/* Get the buffer object */
		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];

		/* Free the memory the buffer object represents */
		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);

		/* Free the private area of the buffer object */
		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
	}

	/* Free the array of buffer objects in the pool */
	kmem_free((caddr_t)pool->fc_ubufs,
	    (sizeof (fc_unsol_buf_t)*pool->pool_nentries));

	/* Free the pool object */
	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));

	return;

} /* emlxs_ub_destroy() */


/*ARGSUSED*/
extern int
emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
{
	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
	emlxs_hba_t	*hba = HBA;

	emlxs_buf_t	*sbp;
	NODELIST	*nlp;
	NODELIST	*prev_nlp;
	uint8_t		ringno;
	RING		*rp;
	clock_t		timeout;
	clock_t		time;
	int32_t		pkt_ret;
	IOCBQ		*iocbq;
	IOCBQ		*next;
	IOCBQ		*prev;
	uint32_t	found;
	uint32_t	att_bit;
	uint32_t	pass = 0;

	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
	iocbq = &sbp->iocbq;
	nlp = (NODELIST *)sbp->node;
	rp = (RING *)sbp->ring;
	ringno = (rp) ? rp->ringno : 0;

	if (!(port->flag & EMLXS_PORT_BOUND)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
		    "Port not bound.");
		return (FC_UNBOUND);
	}

	if (!(hba->flag & FC_ONLINE_MODE)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
		    "Adapter offline.");
		return (FC_OFFLINE);
	}

	/* ULP requires the aborted pkt to be completed */
	/* back to ULP before returning from this call. */
	/* SUN knows of problems with this call so they suggested that we */
	/* always return a FC_FAILURE for this call, until it is worked out. */

	/* Check if pkt is no good */
	if (!(sbp->pkt_flags & PACKET_VALID) ||
	    (sbp->pkt_flags & PACKET_RETURNED)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
		    "Bad sbp. flags=%x", sbp->pkt_flags);
		return (FC_FAILURE);
	}

	/* Tag this now */
	/* This will prevent any thread except ours from completing it */
	mutex_enter(&sbp->mtx);

	/* Check again if we still own this */
	if (!(sbp->pkt_flags & PACKET_VALID) ||
	    (sbp->pkt_flags & PACKET_RETURNED)) {
		mutex_exit(&sbp->mtx);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
		    "Bad sbp. flags=%x", sbp->pkt_flags);
		return (FC_FAILURE);
	}

	/* Check if pkt is a real polled command */
	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
	    (sbp->pkt_flags & PACKET_POLLED)) {
		mutex_exit(&sbp->mtx);

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
		    "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
		    sbp->pkt_flags);
		return (FC_FAILURE);
	}

	sbp->pkt_flags |= PACKET_POLLED;
	sbp->pkt_flags |= PACKET_IN_ABORT;

	if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
	    PACKET_IN_TIMEOUT)) {
		mutex_exit(&sbp->mtx);

		/* Do nothing, pkt already on its way out */
		goto done;
	}

	mutex_exit(&sbp->mtx);

begin:
	pass++;

	mutex_enter(&EMLXS_RINGTX_LOCK);

	if (sbp->pkt_flags & PACKET_IN_TXQ) {
		/* Find it on the queue */
		found = 0;
		if (iocbq->flag & IOCB_PRIORITY) {
			/* Search the priority queue */
			prev = NULL;
			next = (IOCBQ *) nlp->nlp_ptx[ringno].q_first;

			while (next) {
				if (next == iocbq) {
					/* Remove it */
					if (prev) {
						prev->next = iocbq->next;
					}

					if (nlp->nlp_ptx[ringno].q_last ==
					    (void *)iocbq) {
						nlp->nlp_ptx[ringno].q_last =
						    (void *)prev;
					}

					if (nlp->nlp_ptx[ringno].q_first ==
					    (void *)iocbq) {
						nlp->nlp_ptx[ringno].q_first =
						    (void *)iocbq->next;
					}

					nlp->nlp_ptx[ringno].q_cnt--;
					iocbq->next = NULL;
					found = 1;
					break;
				}

				prev = next;
				next = next->next;
			}
		} else {
			/* Search the normal queue */
			prev = NULL;
			next = (IOCBQ *) nlp->nlp_tx[ringno].q_first;

			while (next) {
				if (next == iocbq) {
					/* Remove it */
					if (prev) {
						prev->next = iocbq->next;
					}

					if (nlp->nlp_tx[ringno].q_last ==
					    (void *)iocbq) {
						nlp->nlp_tx[ringno].q_last =
						    (void *)prev;
					}

					if (nlp->nlp_tx[ringno].q_first ==
					    (void *)iocbq) {
						nlp->nlp_tx[ringno].q_first =
						    (void *)iocbq->next;
					}

					nlp->nlp_tx[ringno].q_cnt--;
					iocbq->next = NULL;
					found = 1;
					break;
				}

				prev = next;
				next = (IOCBQ *) next->next;
			}
		}

		if (!found) {
			mutex_exit(&EMLXS_RINGTX_LOCK);
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
			    "I/O not found in driver. sbp=%p flags=%x", sbp,
			    sbp->pkt_flags);
			goto done;
		}

		/* Check if node still needs servicing */
		if ((nlp->nlp_ptx[ringno].q_first) ||
		    (nlp->nlp_tx[ringno].q_first &&
		    !(nlp->nlp_flag[ringno] & NLP_CLOSED))) {

			/*
			 * If this is the base node,
			 * then don't shift the pointers
			 */
			/* We want to drain the base node before moving on */
			if (!nlp->nlp_base) {
				/* Just shift ring queue */
				/* pointers to next node */
				rp->nodeq.q_last = (void *) nlp;
				rp->nodeq.q_first = nlp->nlp_next[ringno];
			}
		} else {
			/* Remove node from ring queue */

			/* If this is the only node on list */
			if (rp->nodeq.q_first == (void *)nlp &&
			    rp->nodeq.q_last == (void *)nlp) {
				rp->nodeq.q_last = NULL;
				rp->nodeq.q_first = NULL;
				rp->nodeq.q_cnt = 0;
			} else if (rp->nodeq.q_first == (void *)nlp) {
				rp->nodeq.q_first = nlp->nlp_next[ringno];
				((NODELIST *) rp->nodeq.q_last)->
				    nlp_next[ringno] = rp->nodeq.q_first;
				rp->nodeq.q_cnt--;
			} else {
				/*
				 * This is a little more difficult find the
				 * previous node in the circular ring queue
				 */
				prev_nlp = nlp;
				while (prev_nlp->nlp_next[ringno] != nlp) {
					prev_nlp = prev_nlp->nlp_next[ringno];
				}

				prev_nlp->nlp_next[ringno] =
				    nlp->nlp_next[ringno];

				if (rp->nodeq.q_last == (void *)nlp) {
					rp->nodeq.q_last = (void *)prev_nlp;
				}
				rp->nodeq.q_cnt--;

			}

			/* Clear node */
			nlp->nlp_next[ringno] = NULL;
		}

		mutex_enter(&sbp->mtx);

		if (sbp->pkt_flags & PACKET_IN_TXQ) {
			sbp->pkt_flags &= ~PACKET_IN_TXQ;
			hba->ring_tx_count[ringno]--;
		}

		mutex_exit(&sbp->mtx);

		/* Free the ulpIoTag and the bmp */
		(void) emlxs_unregister_pkt(rp, sbp->iotag, 0);

		mutex_exit(&EMLXS_RINGTX_LOCK);

		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
		    IOERR_ABORT_REQUESTED, 1);

		goto done;
	}

	mutex_exit(&EMLXS_RINGTX_LOCK);


	/* Check the chip queue */
	mutex_enter(&EMLXS_FCTAB_LOCK(ringno));

	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
	    (sbp == rp->fc_table[sbp->iotag])) {

		/* Create the abort IOCB */
		if (hba->state >= FC_LINK_UP) {
			iocbq =
			    emlxs_create_abort_xri_cn(port, sbp->node,
			    sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS);

			mutex_enter(&sbp->mtx);
			sbp->pkt_flags |= PACKET_XRI_CLOSED;
			sbp->ticks =
			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
			sbp->abort_attempts++;
			mutex_exit(&sbp->mtx);
		} else {
			iocbq =
			    emlxs_create_close_xri_cn(port, sbp->node,
			    sbp->iotag, rp);

			mutex_enter(&sbp->mtx);
			sbp->pkt_flags |= PACKET_XRI_CLOSED;
			sbp->ticks = hba->timer_tics + 30;
			sbp->abort_attempts++;
			mutex_exit(&sbp->mtx);
		}

		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));

		/* Send this iocbq */
		if (iocbq) {
			emlxs_sli_issue_iocb_cmd(hba, rp, iocbq);
			iocbq = NULL;
		}

		goto done;
	}

	mutex_exit(&EMLXS_FCTAB_LOCK(ringno));

	/* Pkt was not on any queues */

	/* Check again if we still own this */
	if (!(sbp->pkt_flags & PACKET_VALID) ||
	    (sbp->pkt_flags &
	    (PACKET_RETURNED | PACKET_IN_COMPLETION |
	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
		goto done;
	}

	/* Apparently the pkt was not found.  Let's delay and try again */
	if (pass < 5) {
		delay(drv_usectohz(5000000));	/* 5 seconds */

		/* Check again if we still own this */
		if (!(sbp->pkt_flags & PACKET_VALID) ||
		    (sbp->pkt_flags &
		    (PACKET_RETURNED | PACKET_IN_COMPLETION |
		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
			goto done;
		}

		goto begin;
	}

force_it:

	/* Force the completion now */

	/* Unregister the pkt */
	(void) emlxs_unregister_pkt(rp, sbp->iotag, 1);

	/* Now complete it */
	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
	    1);

done:

	/* Now wait for the pkt to complete */
	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
		/* Set thread timeout */
		timeout = emlxs_timeout(hba, 30);

		/* Check for panic situation */
		if (ddi_in_panic()) {

			/*
			 * In panic situations there will be one thread with no
			 * interrrupts (hard or soft) and no timers
			 */

			/*
			 * We must manually poll everything in this thread
			 * to keep the driver going.
			 */

			rp = (emlxs_ring_t *)sbp->ring;
			switch (rp->ringno) {
			case FC_FCP_RING:
				att_bit = HA_R0ATT;
				break;

			case FC_IP_RING:
				att_bit = HA_R1ATT;
				break;

			case FC_ELS_RING:
				att_bit = HA_R2ATT;
				break;

			case FC_CT_RING:
				att_bit = HA_R3ATT;
				break;
			}

			/* Keep polling the chip until our IO is completed */
			(void) drv_getparm(LBOLT, &time);
			while ((time < timeout) &&
			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
				emlxs_sli_poll_intr(hba, att_bit);
				(void) drv_getparm(LBOLT, &time);
			}
		} else {
			/* Wait for IO completion or timeout */
			mutex_enter(&EMLXS_PKT_LOCK);
			pkt_ret = 0;
			while ((pkt_ret != -1) &&
			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
				pkt_ret =
				    cv_timedwait(&EMLXS_PKT_CV,
				    &EMLXS_PKT_LOCK, timeout);
			}
			mutex_exit(&EMLXS_PKT_LOCK);
		}

		/* Check if timeout occured. This is not good. */
		/* Something happened to our IO. */
		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
			/* Force the completion now */
			goto force_it;
		}
	}
#if (EMLXS_MODREVX == EMLXS_MODREV2X)
	emlxs_unswap_pkt(sbp);
#endif	/* EMLXS_MODREV2X */

	/* Check again if we still own this */
	if ((sbp->pkt_flags & PACKET_VALID) &&
	    !(sbp->pkt_flags & PACKET_RETURNED)) {
		mutex_enter(&sbp->mtx);
		if ((sbp->pkt_flags & PACKET_VALID) &&
		    !(sbp->pkt_flags & PACKET_RETURNED)) {
			sbp->pkt_flags |= PACKET_RETURNED;
		}
		mutex_exit(&sbp->mtx);
	}
#ifdef ULP_PATCH5
	return (FC_FAILURE);

#else
	return (FC_SUCCESS);

#endif	/* ULP_PATCH5 */


} /* emlxs_pkt_abort() */


extern int32_t
emlxs_reset(opaque_t fca_port_handle, uint32_t cmd)
{
	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
	emlxs_hba_t	*hba = HBA;
	int		rval;
	int		ret;
	clock_t		timeout;

	if (!(port->flag & EMLXS_PORT_BOUND)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_reset failed. Port not bound.");

		return (FC_UNBOUND);
	}

	switch (cmd) {
	case FC_FCA_LINK_RESET:

		if (!(hba->flag & FC_ONLINE_MODE) ||
		    (hba->state <= FC_LINK_DOWN)) {
			return (FC_SUCCESS);
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_reset: Resetting Link.");

		mutex_enter(&EMLXS_LINKUP_LOCK);
		hba->linkup_wait_flag = TRUE;
		mutex_exit(&EMLXS_LINKUP_LOCK);

		if (emlxs_reset_link(hba, 1)) {
			mutex_enter(&EMLXS_LINKUP_LOCK);
			hba->linkup_wait_flag = FALSE;
			mutex_exit(&EMLXS_LINKUP_LOCK);

			return (FC_FAILURE);
		}

		mutex_enter(&EMLXS_LINKUP_LOCK);
		timeout = emlxs_timeout(hba, 60);
		ret = 0;
		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
			ret =
			    cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
			    timeout);
		}

		hba->linkup_wait_flag = FALSE;
		mutex_exit(&EMLXS_LINKUP_LOCK);

		if (ret == -1) {
			return (FC_FAILURE);
		}

		return (FC_SUCCESS);

	case FC_FCA_CORE:
#ifdef DUMP_SUPPORT
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_reset: Core dump.");

		/* Schedule a USER dump */
		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);

		/* Wait for dump to complete */
		emlxs_dump_wait(hba);

		return (FC_SUCCESS);
#endif /* DUMP_SUPPORT */

	case FC_FCA_RESET:
	case FC_FCA_RESET_CORE:

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_reset: Resetting Adapter.");

		rval = FC_SUCCESS;

		if (emlxs_offline(hba) == 0) {
			(void) emlxs_online(hba);
		} else {
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_reset: Adapter reset failed. Device busy.");

			rval = FC_DEVICE_BUSY;
		}

		return (rval);

	default:
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_reset: Unknown command. cmd=%x", cmd);

		break;
	}

	return (FC_FAILURE);

} /* emlxs_reset() */


extern uint32_t	emlxs_core_dump(emlxs_hba_t *hba, char *buffer,
			uint32_t size);
extern uint32_t emlxs_core_size(emlxs_hba_t *hba);

extern int
emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
{
	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
	emlxs_hba_t	*hba = HBA;
	int32_t		ret;
	emlxs_vpd_t	*vpd = &VPD;


	ret = FC_SUCCESS;

	if (!(port->flag & EMLXS_PORT_BOUND)) {
		return (FC_UNBOUND);
	}


#ifdef IDLE_TIMER
	emlxs_pm_busy_component(hba);
#endif	/* IDLE_TIMER */

	switch (pm->pm_cmd_code) {

	case FC_PORT_GET_FW_REV:
	{
		char buffer[128];

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_GET_FW_REV");

		(void) sprintf(buffer, "%s %s", hba->model_info.model,
		    vpd->fw_version);
			bzero(pm->pm_data_buf, pm->pm_data_len);

		if (pm->pm_data_len < strlen(buffer) + 1) {
			ret = FC_NOMEM;

			break;
		}

		(void) strcpy(pm->pm_data_buf, buffer);
		break;
	}

	case FC_PORT_GET_FCODE_REV:
	{
		char buffer[128];

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_GET_FCODE_REV");

		/* Force update here just to be sure */
		emlxs_get_fcode_version(hba);

		(void) sprintf(buffer, "%s %s", hba->model_info.model,
		    vpd->fcode_version);
		bzero(pm->pm_data_buf, pm->pm_data_len);

		if (pm->pm_data_len < strlen(buffer) + 1) {
			ret = FC_NOMEM;
			break;
		}

		(void) strcpy(pm->pm_data_buf, buffer);
		break;
	}

	case FC_PORT_GET_DUMP_SIZE:
	{
#ifdef DUMP_SUPPORT
		uint32_t dump_size = 0;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");

		if (pm->pm_data_len < sizeof (uint32_t)) {
			ret = FC_NOMEM;
			break;
		}

		(void) emlxs_get_dump(hba, NULL, &dump_size);

		*((uint32_t *)pm->pm_data_buf) = dump_size;

#else
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");

#endif /* DUMP_SUPPORT */

		break;
	}

	case FC_PORT_GET_DUMP:
	{
#ifdef DUMP_SUPPORT
		uint32_t dump_size = 0;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_GET_DUMP");

		(void) emlxs_get_dump(hba, NULL, &dump_size);

		if (pm->pm_data_len < dump_size) {
			ret = FC_NOMEM;
			break;
		}

		(void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
		    (uint32_t *)&dump_size);
#else
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_GET_DUMP unsupported.");

#endif /* DUMP_SUPPORT */

		break;
	}

	case FC_PORT_FORCE_DUMP:
	{
#ifdef DUMP_SUPPORT
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_FORCE_DUMP");

		/* Schedule a USER dump */
		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);

		/* Wait for dump to complete */
		emlxs_dump_wait(hba);
#else
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");

#endif /* DUMP_SUPPORT */
		break;
	}

	case FC_PORT_LINK_STATE:
	{
		uint32_t	*link_state;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_LINK_STATE");

		if (pm->pm_stat_len != sizeof (*link_state)) {
			ret = FC_NOMEM;
			break;
		}

		if (pm->pm_cmd_buf != NULL) {
			/*
			 * Can't look beyond the FCA port.
			 */
			ret = FC_INVALID_REQUEST;
			break;
		}

		link_state = (uint32_t *)pm->pm_stat_buf;

		/* Set the state */
		if (hba->state >= FC_LINK_UP) {
			/* Check for loop topology */
			if (hba->topology == TOPOLOGY_LOOP) {
				*link_state = FC_STATE_LOOP;
			} else {
				*link_state = FC_STATE_ONLINE;
			}

			/* Set the link speed */
			switch (hba->linkspeed) {
			case LA_2GHZ_LINK:
				*link_state |= FC_STATE_2GBIT_SPEED;
				break;
			case LA_4GHZ_LINK:
				*link_state |= FC_STATE_4GBIT_SPEED;
				break;
			case LA_8GHZ_LINK:
				*link_state |= FC_STATE_8GBIT_SPEED;
				break;
			case LA_10GHZ_LINK:
				*link_state |= FC_STATE_10GBIT_SPEED;
				break;
			case LA_1GHZ_LINK:
			default:
				*link_state |= FC_STATE_1GBIT_SPEED;
				break;
			}
		} else {
			*link_state = FC_STATE_OFFLINE;
		}

		break;
	}


	case FC_PORT_ERR_STATS:
	case FC_PORT_RLS:
	{
		MAILBOX		*mb;
		fc_rls_acc_t	*bp;

		if (!(hba->flag & FC_ONLINE_MODE)) {
			return (FC_OFFLINE);
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");

		if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
			ret = FC_NOMEM;
			break;
		}

		if ((mb = (MAILBOX *)emlxs_mem_get(hba,
		    MEM_MBOX | MEM_PRI)) == 0) {
			ret = FC_NOMEM;
			break;
		}

		emlxs_mb_read_lnk_stat(hba, mb);
		if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0)
		    != MBX_SUCCESS) {
			ret = FC_PBUSY;
		} else {
			bp = (fc_rls_acc_t *)pm->pm_data_buf;

			bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
			bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
			bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
			bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
			bp->rls_invalid_word =
			    mb->un.varRdLnk.invalidXmitWord;
			bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
		}

		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
		break;
	}

	case FC_PORT_DOWNLOAD_FW:
		if (!(hba->flag & FC_ONLINE_MODE)) {
			return (FC_OFFLINE);
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
		ret = emlxs_fw_download(hba, pm->pm_data_buf,
		    pm->pm_data_len, 1);
		break;

	case FC_PORT_DOWNLOAD_FCODE:
		if (!(hba->flag & FC_ONLINE_MODE)) {
			return (FC_OFFLINE);
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
		ret = emlxs_fw_download(hba, pm->pm_data_buf,
		    pm->pm_data_len, 1);
		break;

	case FC_PORT_DIAG:
	{
		uint32_t errno = 0;
		uint32_t did = 0;
		uint32_t pattern = 0;

		switch (pm->pm_cmd_flags) {
		case EMLXS_DIAG_BIU:

			if (!(hba->flag & FC_ONLINE_MODE)) {
				return (FC_OFFLINE);
			}

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_DIAG_BIU");

			if (pm->pm_data_len) {
				pattern = *((uint32_t *)pm->pm_data_buf);
			}

			errno = emlxs_diag_biu_run(hba, pattern);

			if (pm->pm_stat_len == sizeof (errno)) {
				*(int *)pm->pm_stat_buf = errno;
			}

			break;


		case EMLXS_DIAG_POST:

			if (!(hba->flag & FC_ONLINE_MODE)) {
				return (FC_OFFLINE);
			}

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_DIAG_POST");

			errno = emlxs_diag_post_run(hba);

			if (pm->pm_stat_len == sizeof (errno)) {
				*(int *)pm->pm_stat_buf = errno;
			}

			break;


		case EMLXS_DIAG_ECHO:

			if (!(hba->flag & FC_ONLINE_MODE)) {
				return (FC_OFFLINE);
			}

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_DIAG_ECHO");

			if (pm->pm_cmd_len != sizeof (uint32_t)) {
				ret = FC_INVALID_REQUEST;
				break;
			}

			did = *((uint32_t *)pm->pm_cmd_buf);

			if (pm->pm_data_len) {
				pattern = *((uint32_t *)pm->pm_data_buf);
			}

			errno = emlxs_diag_echo_run(port, did, pattern);

			if (pm->pm_stat_len == sizeof (errno)) {
				*(int *)pm->pm_stat_buf = errno;
			}

			break;


		case EMLXS_PARM_GET_NUM:
		{
			uint32_t	*num;
			emlxs_config_t	*cfg;
			uint32_t	i;
			uint32_t	count;

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_PARM_GET_NUM");

			if (pm->pm_stat_len < sizeof (uint32_t)) {
				ret = FC_NOMEM;
				break;
			}

			num = (uint32_t *)pm->pm_stat_buf;
			count = 0;
			cfg = &CFG;
			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
				if (!(cfg->flags & PARM_HIDDEN)) {
					count++;
				}

			}

			*num = count;

			break;
		}

		case EMLXS_PARM_GET_LIST:
		{
			emlxs_parm_t	*parm;
			emlxs_config_t	*cfg;
			uint32_t	i;
			uint32_t	max_count;

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_PARM_GET_LIST");

			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
				ret = FC_NOMEM;
				break;
			}

			max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);

			parm = (emlxs_parm_t *)pm->pm_stat_buf;
			cfg = &CFG;
			for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
			    cfg++) {
				if (!(cfg->flags & PARM_HIDDEN)) {
					(void) strcpy(parm->label, cfg->string);
					parm->min = cfg->low;
					parm->max = cfg->hi;
					parm->def = cfg->def;
					parm->current = cfg->current;
					parm->flags = cfg->flags;
					(void) strcpy(parm->help, cfg->help);
					parm++;
					max_count--;
				}
			}

			break;
		}

		case EMLXS_PARM_GET:
		{
			emlxs_parm_t	*parm_in;
			emlxs_parm_t	*parm_out;
			emlxs_config_t	*cfg;
			uint32_t	i;
			uint32_t	len;

			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
				EMLXS_MSGF(EMLXS_CONTEXT,
				    &emlxs_sfs_debug_msg,
				    "fca_port_manage: EMLXS_PARM_GET. "
				    "inbuf too small.");

				ret = FC_BADCMD;
				break;
			}

			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
				EMLXS_MSGF(EMLXS_CONTEXT,
				    &emlxs_sfs_debug_msg,
				    "fca_port_manage: EMLXS_PARM_GET. "
				    "outbuf too small");

				ret = FC_BADCMD;
				break;
			}

			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
			len = strlen(parm_in->label);
			cfg = &CFG;
			ret = FC_BADOBJECT;

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_PARM_GET: %s",
			    parm_in->label);

			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
				if (len == strlen(cfg->string) &&
				    (strcmp(parm_in->label,
				    cfg->string) == 0)) {
					(void) strcpy(parm_out->label,
					    cfg->string);
					parm_out->min = cfg->low;
					parm_out->max = cfg->hi;
					parm_out->def = cfg->def;
					parm_out->current = cfg->current;
					parm_out->flags = cfg->flags;
					(void) strcpy(parm_out->help,
					    cfg->help);

					ret = FC_SUCCESS;
					break;
				}
			}

			break;
		}

		case EMLXS_PARM_SET:
		{
			emlxs_parm_t	*parm_in;
			emlxs_parm_t	*parm_out;
			emlxs_config_t	*cfg;
			uint32_t	i;
			uint32_t	len;

			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
				EMLXS_MSGF(EMLXS_CONTEXT,
				    &emlxs_sfs_debug_msg,
				    "fca_port_manage: EMLXS_PARM_GET. "
				    "inbuf too small.");

				ret = FC_BADCMD;
				break;
			}

			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
				EMLXS_MSGF(EMLXS_CONTEXT,
				    &emlxs_sfs_debug_msg,
				    "fca_port_manage: EMLXS_PARM_GET. "
				    "outbuf too small");
				ret = FC_BADCMD;
				break;
			}

			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
			len = strlen(parm_in->label);
			cfg = &CFG;
			ret = FC_BADOBJECT;

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d",
			    parm_in->label, parm_in->current,
			    parm_in->current);

			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
				/* Find matching parameter string */
				if (len == strlen(cfg->string) &&
				    (strcmp(parm_in->label,
				    cfg->string) == 0)) {
					/* Attempt to update parameter */
					if (emlxs_set_parm(hba, i,
					    parm_in->current) == FC_SUCCESS) {
						(void) strcpy(parm_out->label,
						    cfg->string);
						parm_out->min = cfg->low;
						parm_out->max = cfg->hi;
						parm_out->def = cfg->def;
						parm_out->current =
						    cfg->current;
						parm_out->flags = cfg->flags;
						(void) strcpy(parm_out->help,
						    cfg->help);

						ret = FC_SUCCESS;
					}

					break;
				}
			}

			break;
		}

		case EMLXS_LOG_GET:
		{
			emlxs_log_req_t		*req;
			emlxs_log_resp_t	*resp;
			uint32_t		len;

			/* Check command size */
			if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
				ret = FC_BADCMD;
				break;
			}

			/* Get the request */
			req = (emlxs_log_req_t *)pm->pm_cmd_buf;

			/* Calculate the response length from the request */
			len = sizeof (emlxs_log_resp_t) +
			    (req->count * MAX_LOG_MSG_LENGTH);

			/* Check the response buffer length */
			if (pm->pm_stat_len < len) {
				ret = FC_BADCMD;
				break;
			}

			/* Get the response pointer */
			resp = (emlxs_log_resp_t *)pm->pm_stat_buf;

			/* Get the request log enties */
			(void) emlxs_msg_log_get(hba, req, resp);

			ret = FC_SUCCESS;
			break;
		}

		case EMLXS_GET_BOOT_REV:
		{
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_GET_BOOT_REV");

			if (pm->pm_stat_len < strlen(vpd->boot_version)) {
				ret = FC_NOMEM;
				break;
			}

			bzero(pm->pm_stat_buf, pm->pm_stat_len);
			(void) sprintf(pm->pm_stat_buf, "%s %s",
			    hba->model_info.model, vpd->boot_version);

			break;
		}

		case EMLXS_DOWNLOAD_BOOT:
			if (!(hba->flag & FC_ONLINE_MODE)) {
				return (FC_OFFLINE);
			}

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_DOWNLOAD_BOOT");

			ret = emlxs_fw_download(hba, pm->pm_data_buf,
			    pm->pm_data_len, 1);
			break;

		case EMLXS_DOWNLOAD_CFL:
		{
			uint32_t *buffer;
			uint32_t region;
			uint32_t length;

			if (!(hba->flag & FC_ONLINE_MODE)) {
				return (FC_OFFLINE);
			}

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_DOWNLOAD_CFL");

			/* Extract the region number from the first word. */
			buffer = (uint32_t *)pm->pm_data_buf;
			region = *buffer++;

			/* Adjust the image length for the header word */
			length = pm->pm_data_len - 4;

			ret =
			    emlxs_cfl_download(hba, region, (caddr_t)buffer,
			    length);
			break;
		}

		case EMLXS_VPD_GET:
		{
			emlxs_vpd_desc_t	*vpd_out;

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_VPD_GET");

			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
				ret = FC_BADCMD;
				break;
			}

			vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
			bzero(vpd_out, sizeof (emlxs_vpd_desc_t));

			(void) strncpy(vpd_out->id, vpd->id,
			    sizeof (vpd_out->id));
			(void) strncpy(vpd_out->part_num, vpd->part_num,
			    sizeof (vpd_out->part_num));
			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
			    sizeof (vpd_out->eng_change));
			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
			    sizeof (vpd_out->manufacturer));
			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
			    sizeof (vpd_out->serial_num));
			(void) strncpy(vpd_out->model, vpd->model,
			    sizeof (vpd_out->model));
			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
			    sizeof (vpd_out->model_desc));
			(void) strncpy(vpd_out->port_num, vpd->port_num,
			    sizeof (vpd_out->port_num));
			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
			    sizeof (vpd_out->prog_types));

			ret = FC_SUCCESS;

			break;
		}

		case EMLXS_GET_FCIO_REV:
		{
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_GET_FCIO_REV");

			if (pm->pm_stat_len < sizeof (uint32_t)) {
				ret = FC_NOMEM;
				break;
			}

			bzero(pm->pm_stat_buf, pm->pm_stat_len);
			*(uint32_t *)pm->pm_stat_buf = FCIO_REV;

			break;
		}

		case EMLXS_GET_DFC_REV:
		{
			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_GET_DFC_REV");

			if (pm->pm_stat_len < sizeof (uint32_t)) {
				ret = FC_NOMEM;
				break;
			}

			bzero(pm->pm_stat_buf, pm->pm_stat_len);
			*(uint32_t *)pm->pm_stat_buf = DFC_REV;

			break;
		}

		case EMLXS_SET_BOOT_STATE:
		case EMLXS_SET_BOOT_STATE_old:
		{
			uint32_t	state;

			if (!(hba->flag & FC_ONLINE_MODE)) {
				return (FC_OFFLINE);
			}

			if (pm->pm_cmd_len < sizeof (uint32_t)) {
				EMLXS_MSGF(EMLXS_CONTEXT,
				    &emlxs_sfs_debug_msg,
				    "fca_port_manage: EMLXS_SET_BOOT_STATE");
				ret = FC_BADCMD;
				break;
			}

			state = *(uint32_t *)pm->pm_cmd_buf;

			if (state == 0) {
				EMLXS_MSGF(EMLXS_CONTEXT,
				    &emlxs_sfs_debug_msg,
				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
				    "Disable");
				ret = emlxs_boot_code_disable(hba);
			} else {
				EMLXS_MSGF(EMLXS_CONTEXT,
				    &emlxs_sfs_debug_msg,
				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
				    "Enable");
				ret = emlxs_boot_code_enable(hba);
			}

			break;
		}

		case EMLXS_GET_BOOT_STATE:
		case EMLXS_GET_BOOT_STATE_old:
		{
			if (!(hba->flag & FC_ONLINE_MODE)) {
				return (FC_OFFLINE);
			}

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_GET_BOOT_STATE");

			if (pm->pm_stat_len < sizeof (uint32_t)) {
				ret = FC_NOMEM;
				break;
			}
			bzero(pm->pm_stat_buf, pm->pm_stat_len);

			ret = emlxs_boot_code_state(hba);

			if (ret == FC_SUCCESS) {
				*(uint32_t *)pm->pm_stat_buf = 1;
				ret = FC_SUCCESS;
			} else if (ret == FC_FAILURE) {
				ret = FC_SUCCESS;
			}

			break;
		}

		case EMLXS_HW_ERROR_TEST:
		{
			if (!(hba->flag & FC_ONLINE_MODE)) {
				return (FC_OFFLINE);
			}

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_HW_ERROR_TEST");

			/* Trigger a mailbox timeout */
			hba->mbox_timer = hba->timer_tics;

			break;
		}

		case EMLXS_TEST_CODE:
		{
			uint32_t *cmd;

			if (!(hba->flag & FC_ONLINE_MODE)) {
				return (FC_OFFLINE);
			}

			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
			    "fca_port_manage: EMLXS_TEST_CODE");

			if (pm->pm_cmd_len < sizeof (uint32_t)) {
				EMLXS_MSGF(EMLXS_CONTEXT,
				    &emlxs_sfs_debug_msg,
				    "fca_port_manage: EMLXS_TEST_CODE. "
				    "inbuf to small.");

				ret = FC_BADCMD;
				break;
			}

			cmd = (uint32_t *)pm->pm_cmd_buf;

			ret = emlxs_test(hba, cmd[0],
			    (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);

			break;
		}

		default:

			ret = FC_INVALID_REQUEST;
			break;
		}

		break;

	}

	case FC_PORT_INITIALIZE:
		if (!(hba->flag & FC_ONLINE_MODE)) {
			return (FC_OFFLINE);
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_INITIALIZE");
		break;

	case FC_PORT_LOOPBACK:
		if (!(hba->flag & FC_ONLINE_MODE)) {
			return (FC_OFFLINE);
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_LOOPBACK");
		break;

	case FC_PORT_BYPASS:
		if (!(hba->flag & FC_ONLINE_MODE)) {
			return (FC_OFFLINE);
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_BYPASS");
		ret = FC_INVALID_REQUEST;
		break;

	case FC_PORT_UNBYPASS:
		if (!(hba->flag & FC_ONLINE_MODE)) {
			return (FC_OFFLINE);
		}

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_UNBYPASS");
		ret = FC_INVALID_REQUEST;
		break;

	case FC_PORT_GET_NODE_ID:
	{
		fc_rnid_t *rnid;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_GET_NODE_ID");

		bzero(pm->pm_data_buf, pm->pm_data_len);

		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
			ret = FC_NOMEM;
			break;
		}

		rnid = (fc_rnid_t *)pm->pm_data_buf;

		(void) sprintf((char *)rnid->global_id,
		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
		    hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
		    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);

		rnid->unit_type  = RNID_HBA;
		rnid->port_id    = port->did;
		rnid->ip_version = RNID_IPV4;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "GET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);

		ret = FC_SUCCESS;
		break;
	}

	case FC_PORT_SET_NODE_ID:
	{
		fc_rnid_t *rnid;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: FC_PORT_SET_NODE_ID");

		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
			ret = FC_NOMEM;
			break;
		}

		rnid = (fc_rnid_t *)pm->pm_data_buf;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "SET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);

		ret = FC_SUCCESS;
		break;
	}

	default:
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "fca_port_manage: code=%x", pm->pm_cmd_code);
		ret = FC_INVALID_REQUEST;
		break;

	}

	return (ret);

} /* emlxs_port_manage() */


/*ARGSUSED*/
static uint32_t
emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
    uint32_t *arg)
{
	uint32_t rval = 0;
	emlxs_port_t   *port = &PPORT;

	switch (test_code) {
#ifdef TEST_SUPPORT
	case 1: /* SCSI underrun */
	{
		hba->underrun_counter = (args)? arg[0]:1;
		break;
	}
#endif /* TEST_SUPPORT */

	default:
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
		    "emlxs_test: Unsupported test code. (0x%x)", test_code);
		rval = FC_INVALID_REQUEST;
	}

	return (rval);

} /* emlxs_test() */


/*
 * Given the device number, return the devinfo pointer or the ddiinst number.
 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
 * before attach.
 *
 * Translate "dev_t" to a pointer to the associated "dev_info_t".
 */
/*ARGSUSED*/
static int
emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
{
	emlxs_hba_t	*hba;
	int32_t		ddiinst;

	ddiinst = getminor((dev_t)arg);

	switch (infocmd) {
	case DDI_INFO_DEVT2DEVINFO:
		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
		if (hba)
			*result = hba->dip;
		else
			*result = NULL;
		break;

	case DDI_INFO_DEVT2INSTANCE:
		*result = (void *)((unsigned long)ddiinst);
		break;

	default:
		return (DDI_FAILURE);
	}

	return (DDI_SUCCESS);

} /* emlxs_info() */


static int32_t
emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
{
	emlxs_hba_t	*hba;
	emlxs_port_t	*port;
	int32_t		ddiinst;
	int		rval = DDI_SUCCESS;

	ddiinst = ddi_get_instance(dip);
	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
	port = &PPORT;

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
	    "fca_power: comp=%x level=%x", comp, level);

	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
		return (DDI_FAILURE);
	}

	mutex_enter(&hba->pm_lock);

	/* If we are already at the proper level then return success */
	if (hba->pm_level == level) {
		mutex_exit(&hba->pm_lock);
		return (DDI_SUCCESS);
	}

	switch (level) {
	case EMLXS_PM_ADAPTER_UP:

		/*
		 * If we are already in emlxs_attach,
		 * let emlxs_hba_attach take care of things
		 */
		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
			hba->pm_level = EMLXS_PM_ADAPTER_UP;
			break;
		}

		/* Check if adapter is suspended */
		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
			hba->pm_level = EMLXS_PM_ADAPTER_UP;

			/* Try to resume the port */
			rval = emlxs_hba_resume(dip);

			if (rval != DDI_SUCCESS) {
				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
			}
			break;
		}

		/* Set adapter up */
		hba->pm_level = EMLXS_PM_ADAPTER_UP;
		break;

	case EMLXS_PM_ADAPTER_DOWN:


		/*
		 * If we are already in emlxs_detach,
		 * let emlxs_hba_detach take care of things
		 */
		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
			break;
		}

		/* Check if adapter is not suspended */
		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;

			/* Try to suspend the port */
			rval = emlxs_hba_suspend(dip);

			if (rval != DDI_SUCCESS) {
				hba->pm_level = EMLXS_PM_ADAPTER_UP;
			}

			break;
		}

		/* Set adapter down */
		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
		break;

	default:
		rval = DDI_FAILURE;
		break;

	}

	mutex_exit(&hba->pm_lock);

	return (rval);

} /* emlxs_power() */


#ifdef EMLXS_I386
#ifdef S11
/*
 * quiesce(9E) entry point.
 *
 * This function is called when the system is single-thread at hight PIL
 * with preemption disabled. Therefore, this function must not be blocked.
 *
 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
 * DDI_FAILURE indicates an eerror condition and should almost never happen.
 */
static int
emlxs_quiesce(dev_info_t *dip)
{
	emlxs_hba_t	*hba;
	emlxs_port_t	*port;
	int32_t		ddiinst;
	int		rval = DDI_SUCCESS;

	ddiinst = ddi_get_instance(dip);
	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
	port = &PPORT;

	if (hba == NULL || port == NULL) {
		return (DDI_FAILURE);
	}

	if (emlxs_sli_hba_reset(hba, 1, 1) == 0) {
		return (rval);
	} else {
		return (DDI_FAILURE);
	}

} /* emlxs_quiesce */
#endif
#endif /* EMLXS_I386 */


static int
emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
{
	emlxs_hba_t	*hba;
	emlxs_port_t	*port;
	int		ddiinst;

	ddiinst = getminor(*dev_p);
	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);

	if (hba == NULL) {
		return (ENXIO);
	}

	port = &PPORT;

	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
		    "open failed: Driver suspended.");
		return (ENXIO);
	}

	if (otype != OTYP_CHR) {
		return (EINVAL);
	}

	if (drv_priv(cred_p)) {
		return (EPERM);
	}

	mutex_enter(&EMLXS_IOCTL_LOCK);

	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
		mutex_exit(&EMLXS_IOCTL_LOCK);
		return (EBUSY);
	}

	if (flag & FEXCL) {
		if (hba->ioctl_flags & EMLXS_OPEN) {
			mutex_exit(&EMLXS_IOCTL_LOCK);
			return (EBUSY);
		}

		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
	}

	hba->ioctl_flags |= EMLXS_OPEN;

	mutex_exit(&EMLXS_IOCTL_LOCK);

	return (0);

} /* emlxs_open() */


/*ARGSUSED*/
static int
emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
{
	emlxs_hba_t	*hba;
	int		ddiinst;

	ddiinst = getminor(dev);
	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);

	if (hba == NULL) {
		return (ENXIO);
	}

	if (otype != OTYP_CHR) {
		return (EINVAL);
	}

	mutex_enter(&EMLXS_IOCTL_LOCK);

	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
		mutex_exit(&EMLXS_IOCTL_LOCK);
		return (ENODEV);
	}

	hba->ioctl_flags &= ~EMLXS_OPEN;
	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;

	mutex_exit(&EMLXS_IOCTL_LOCK);

	return (0);

} /* emlxs_close() */


/*ARGSUSED*/
static int
emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
    cred_t *cred_p, int32_t *rval_p)
{
	emlxs_hba_t	*hba;
	emlxs_port_t	*port;
	int		rval = 0;	/* return code */
	int		ddiinst;

	ddiinst = getminor(dev);
	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);

	if (hba == NULL) {
		return (ENXIO);
	}

	port = &PPORT;

	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
		    "ioctl failed: Driver suspended.");

		return (ENXIO);
	}

	mutex_enter(&EMLXS_IOCTL_LOCK);
	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
		mutex_exit(&EMLXS_IOCTL_LOCK);
		return (ENXIO);
	}
	mutex_exit(&EMLXS_IOCTL_LOCK);

#ifdef IDLE_TIMER
	emlxs_pm_busy_component(hba);
#endif	/* IDLE_TIMER */

	switch (cmd) {
#ifdef DFC_SUPPORT
	case EMLXS_DFC_COMMAND:
		rval = emlxs_dfc_manage(hba, (void *)arg, mode);
		break;
#endif	/* DFC_SUPPORT */

	default:
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
		    "ioctl: Invalid command received. cmd=%x", cmd);
		rval = EINVAL;
	}

done:
	return (rval);

} /* emlxs_ioctl() */



/*
 *
 *	Device Driver Common Routines
 *
 */

/* emlxs_pm_lock must be held for this call */
static int
emlxs_hba_resume(dev_info_t *dip)
{
	emlxs_hba_t	*hba;
	emlxs_port_t	*port;
	int		ddiinst;

	ddiinst = ddi_get_instance(dip);
	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
	port = &PPORT;

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);

	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
		return (DDI_SUCCESS);
	}

	hba->pm_state &= ~EMLXS_PM_SUSPENDED;

	/* Take the adapter online */
	if (emlxs_power_up(hba)) {
		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
		    "Unable to take adapter online.");

		hba->pm_state |= EMLXS_PM_SUSPENDED;

		return (DDI_FAILURE);
	}

	return (DDI_SUCCESS);

} /* emlxs_hba_resume() */


/* emlxs_pm_lock must be held for this call */
static int
emlxs_hba_suspend(dev_info_t *dip)
{
	emlxs_hba_t	*hba;
	emlxs_port_t	*port;
	int		ddiinst;

	ddiinst = ddi_get_instance(dip);
	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
	port = &PPORT;

	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);

	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
		return (DDI_SUCCESS);
	}

	hba->pm_state |= EMLXS_PM_SUSPENDED;

	/* Take the adapter offline */
	if (emlxs_power_down(hba)) {
		hba->pm_state &= ~EMLXS_PM_SUSPENDED;

		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
		    "Unable to take adapter offline.");

		return (DDI_FAILURE);
	}

	return (DDI_SUCCESS);

} /* emlxs_hba_suspend() */



static void
emlxs_lock_init(emlxs_hba_t *hba)
{
	emlxs_port_t	*port = &PPORT;
	int32_t		ddiinst;
	char		buf[64];
	uint32_t	i;

	ddiinst = hba->ddiinst;

	/* Initialize the power management */
	(void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
	mutex_init(&hba->pm_lock, buf, MUTEX_DRIVER, (void *)hba->intr_arg);

	(void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
	mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER,
	    (void *)hba->intr_arg);

	(void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
	cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);

	(void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
	mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER,
	    (void *)hba->intr_arg);

	(void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
	mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER,
	    (void *)hba->intr_arg);

	(void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
	cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);

	(void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
	mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
	    (void *)hba->intr_arg);

	(void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
	cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);

	(void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst);
	mutex_init(&EMLXS_RINGTX_LOCK, buf, MUTEX_DRIVER,
	    (void *)hba->intr_arg);

	for (i = 0; i < MAX_RINGS; i++) {
		(void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME,
		    ddiinst, i);
		mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
		    (void *)hba->intr_arg);

		(void) sprintf(buf, "%s%d_fctab%d_lock mutex", DRIVER_NAME,
		    ddiinst, i);
		mutex_init(&EMLXS_FCTAB_LOCK(i), buf, MUTEX_DRIVER,
		    (void *)hba->intr_arg);
	}

	(void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
	mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
	    (void *)hba->intr_arg);

	(void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
	mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
	    (void *)hba->intr_arg);

	(void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
	mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER,
	    (void *)hba->intr_arg);

#ifdef DUMP_SUPPORT
	(void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst);
	mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER,
	    (void *)hba->intr_arg);
#endif /* DUMP_SUPPORT */

	/* Create per port locks */
	for (i = 0; i < MAX_VPORTS; i++) {
		port = &VPORT(i);

		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);

		if (i == 0) {
			(void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME,
			    ddiinst);
			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
			    (void *)hba->intr_arg);

			(void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME,
			    ddiinst);
			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);

			(void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME,
			    ddiinst);
			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
			    (void *)hba->intr_arg);
		} else {
			(void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
			    DRIVER_NAME, ddiinst, port->vpi);
			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
			    (void *)hba->intr_arg);

			(void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME,
			    ddiinst, port->vpi);
			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);

			(void) sprintf(buf, "%s%d.%d_ub_lock mutex",
			    DRIVER_NAME, ddiinst, port->vpi);
			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
			    (void *)hba->intr_arg);
		}
	}

	return;

} /* emlxs_lock_init() */



static void
emlxs_lock_destroy(emlxs_hba_t *hba)
{
	emlxs_port_t	*port = &PPORT;
	uint32_t	i;

	mutex_destroy(&EMLXS_TIMER_LOCK);
	cv_destroy(&hba->timer_lock_cv);

	mutex_destroy(&EMLXS_PORT_LOCK);

	cv_destroy(&EMLXS_MBOX_CV);
	cv_destroy(&EMLXS_LINKUP_CV);

	mutex_destroy(&EMLXS_LINKUP_LOCK);
	mutex_destroy(&EMLXS_MBOX_LOCK);

	mutex_destroy(&EMLXS_RINGTX_LOCK);

	for (i = 0; i < MAX_RINGS; i++) {
		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
		mutex_destroy(&EMLXS_FCTAB_LOCK(i));
	}

	mutex_destroy(&EMLXS_MEMGET_LOCK);
	mutex_destroy(&EMLXS_MEMPUT_LOCK);
	mutex_destroy(&EMLXS_IOCTL_LOCK);
	mutex_destroy(&hba->pm_lock);

#ifdef DUMP_SUPPORT
	mutex_destroy(&EMLXS_DUMP_LOCK);
#endif /* DUMP_SUPPORT */

	/* Destroy per port locks */
	for (i = 0; i < MAX_VPORTS; i++) {
		port = &VPORT(i);
		rw_destroy(&port->node_rwlock);
		mutex_destroy(&EMLXS_PKT_LOCK);
		cv_destroy(&EMLXS_PKT_CV);
		mutex_destroy(&EMLXS_UB_LOCK);
	}

	return;

} /* emlxs_lock_destroy() */