changeset 14029:11aad50aea32

3701 Chelsio Terminator 4 NIC driver for illumos Reviewed by: Robert Mustacchi <rm@joyent.com> Approved by: Dan McDonald <danmcd@nexenta.com>
author Gireesh Nagabhushana <gireesh@chelsio.com>
date Thu, 23 May 2013 09:51:05 -0400
parents bcbb822da4dd
children 99b3fd6e18da
files usr/src/pkg/manifests/driver-network-cxgbe.mf usr/src/uts/common/Makefile.files usr/src/uts/common/Makefile.rules usr/src/uts/common/io/cxgbe/common/common.c usr/src/uts/common/io/cxgbe/common/common.h usr/src/uts/common/io/cxgbe/common/t4_hw.c usr/src/uts/common/io/cxgbe/common/t4_hw.h usr/src/uts/common/io/cxgbe/common/t4_msg.h usr/src/uts/common/io/cxgbe/common/t4_regs.h usr/src/uts/common/io/cxgbe/common/t4_regs_values.h usr/src/uts/common/io/cxgbe/common/t4_tcb.h usr/src/uts/common/io/cxgbe/cxgbe/cxgbe.c usr/src/uts/common/io/cxgbe/firmware/t4_cfg.c usr/src/uts/common/io/cxgbe/firmware/t4_cfg.h usr/src/uts/common/io/cxgbe/firmware/t4_fw.c usr/src/uts/common/io/cxgbe/firmware/t4_fw.h usr/src/uts/common/io/cxgbe/firmware/t4fw_interface.h usr/src/uts/common/io/cxgbe/shared/shared.c usr/src/uts/common/io/cxgbe/shared/shared.h usr/src/uts/common/io/cxgbe/t4nex/adapter.c usr/src/uts/common/io/cxgbe/t4nex/adapter.h usr/src/uts/common/io/cxgbe/t4nex/offload.h usr/src/uts/common/io/cxgbe/t4nex/osdep.c usr/src/uts/common/io/cxgbe/t4nex/osdep.h usr/src/uts/common/io/cxgbe/t4nex/t4_ioctl.c usr/src/uts/common/io/cxgbe/t4nex/t4_l2t.c usr/src/uts/common/io/cxgbe/t4nex/t4_l2t.h usr/src/uts/common/io/cxgbe/t4nex/t4_mac.c usr/src/uts/common/io/cxgbe/t4nex/t4_nexus.c usr/src/uts/common/io/cxgbe/t4nex/t4_sge.c usr/src/uts/common/io/cxgbe/t4nex/t4nex.h usr/src/uts/common/io/cxgbe/t4nex/version.h usr/src/uts/intel/Makefile.intel.shared usr/src/uts/intel/cxgbe/Makefile usr/src/uts/intel/cxgbe/cxgbe/Makefile usr/src/uts/intel/cxgbe/t4nex/Makefile
diffstat 36 files changed, 87295 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/pkg/manifests/driver-network-cxgbe.mf	Thu May 23 09:51:05 2013 -0400
@@ -0,0 +1,50 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source.  A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2013 by Chelsio Communications, Inc. All rights reserved.
+#
+
+<include global_zone_only_component>
+set name=pkg.fmri value=pkg:/driver/network/cxgbe@$(PKGVERS)
+set name=pkg.description \
+    value="Chelsio Terminator 4 10 Gigabit Ethernet Adapter Driver"
+set name=pkg.summary value="Chelsio Terminator 4 10GE NIC Driver"
+set name=info.classification \
+    value=org.opensolaris.category.2008:Drivers/Networking
+set name=variant.arch value=$(ARCH)
+dir path=kernel group=sys
+dir path=kernel/drv group=sys
+dir path=kernel/drv/$(ARCH64) group=sys
+driver name=cxgbe alias=cxgbe clone_perms="cxgbe 0666 root sys" \
+    perms="* 0666 root sys"
+driver name=t4nex clone_perms="t4nex 0666 root sys" \
+    devlink=type=ddi_ctl:devctl;minor1=t4nex\tt4nex\M2 \
+    perms="* 0666 root sys" \
+    alias=pciex1425,4400 \
+    alias=pciex1425,4401 \
+    alias=pciex1425,4402 \
+    alias=pciex1425,4403 \
+    alias=pciex1425,4404 \
+    alias=pciex1425,4405 \
+    alias=pciex1425,4406 \
+    alias=pciex1425,4407 \
+    alias=pciex1425,4408 \
+    alias=pciex1425,4409 \
+    alias=pciex1425,440a \
+    alias=pciex1425,440d \
+    alias=pciex1425,440e
+file path=kernel/drv/$(ARCH64)/cxgbe group=sys
+file path=kernel/drv/$(ARCH64)/t4nex group=sys
+$(i386_ONLY)file path=kernel/drv/cxgbe group=sys
+$(i386_ONLY)file path=kernel/drv/t4nex group=sys
+license cr_Sun license=cr_Sun
+license lic_CDDL license=lic_CDDL
--- a/usr/src/uts/common/Makefile.files	Tue May 21 15:31:47 2013 -0800
+++ b/usr/src/uts/common/Makefile.files	Thu May 23 09:51:05 2013 -0400
@@ -1880,6 +1880,19 @@
 		vsc7321.o vsc7326.o xpak.o
 
 #
+#	Chelsio Terminator 4 10G NIC nexus driver module
+#
+CXGBE_FW_OBJS  =	t4_fw.o t4_cfg.o
+CXGBE_COM_OBJS =	t4_hw.o common.o
+CXGBE_NEX_OBJS =	t4_nexus.o t4_sge.o t4_mac.o t4_ioctl.o shared.o \
+			t4_l2t.o adapter.o osdep.o
+
+#
+#	Chelsio Terminator 4 10G NIC driver module
+#
+CXGBE_OBJS =	cxgbe.o
+
+#
 #	PCI strings file
 #
 PCI_STRING_OBJS = pci_strings.o
--- a/usr/src/uts/common/Makefile.rules	Tue May 21 15:31:47 2013 -0800
+++ b/usr/src/uts/common/Makefile.rules	Thu May 23 09:51:05 2013 -0400
@@ -1292,6 +1292,26 @@
 	$(COMPILE.c) -o $@ $<
 	$(CTFCONVERT_O)
 
+$(OBJS_DIR)/%.o:		$(UTSBASE)/common/io/cxgbe/common/%.c
+	$(COMPILE.c) -o $@ $<
+	$(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o:		$(UTSBASE)/common/io/cxgbe/shared/%.c
+	$(COMPILE.c) -o $@ $<
+	$(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o:		$(UTSBASE)/common/io/cxgbe/firmware/%.c
+	$(COMPILE.c) -o $@ $<
+	$(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o:		$(UTSBASE)/common/io/cxgbe/t4nex/%.c
+	$(COMPILE.c) -o $@ $<
+	$(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o:		$(UTSBASE)/common/io/cxgbe/cxgbe/%.c
+	$(COMPILE.c) -o $@ $<
+	$(CTFCONVERT_O)
+
 $(OBJS_DIR)/%.o:		$(UTSBASE)/common/io/ixgb/%.c
 	$(COMPILE.c) -o $@ $<
 	$(CTFCONVERT_O)
@@ -2497,6 +2517,21 @@
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/chxge/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
+$(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/cxgbe/common/%.c
+	@($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/cxgbe/shared/%.c
+	@($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/cxgbe/firmware/%.c
+	@($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/cxgbe/t4nex/%.c
+	@($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/cxgbe/cxgbe/%.c
+	@($(LHEAD) $(LINT.c) $< $(LTAIL))
+
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/common/io/ixgb/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/cxgbe/common/common.c	Thu May 23 09:51:05 2013 -0400
@@ -0,0 +1,98 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * This file is part of the Chelsio T4 Ethernet driver.
+ *
+ * Copyright (C) 2005-2013 Chelsio Communications.  All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+#include "common.h"
+
+int
+is_offload(const struct adapter *adap)
+{
+	return (adap->params.offload);
+}
+
+unsigned int
+core_ticks_per_usec(const struct adapter *adap)
+{
+	return (adap->params.vpd.cclk / 1000);
+}
+
+int
+t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl)
+{
+	return (t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true));
+}
+
+unsigned int
+us_to_core_ticks(const struct adapter *adap, unsigned int us)
+{
+	return ((us * adap->params.vpd.cclk) / 1000);
+}
+
+unsigned int
+core_ticks_to_us(const struct adapter *adapter, unsigned int ticks)
+{
+	/* add Core Clock / 2 to round ticks to nearest uS */
+	return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
+	    adapter->params.vpd.cclk);
+}
+
+unsigned int
+dack_ticks_to_usec(const struct adapter *adap, unsigned int ticks)
+{
+	return ((ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap));
+}
+
+int
+is_bypass(const adapter_t *adap)
+{
+	return (adap->params.bypass);
+}
+
+int
+is_bypass_device(int device)
+{
+	/* TODO - this should be set based upon device capabilities */
+	switch (device) {
+#ifdef CONFIG_CHELSIO_BYPASS
+	case 0x440b:
+	case 0x440c:
+		return (1);
+#endif
+
+	default:
+		return (0);
+	}
+}
+
+int
+t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, int polarity,
+    int attempts, int delay)
+{
+	return (t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+	    delay, NULL));
+}
+
+int
+t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, int size,
+    void *rpl)
+{
+	return (t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/cxgbe/common/common.h	Thu May 23 09:51:05 2013 -0400
@@ -0,0 +1,512 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * This file is part of the Chelsio T4 Ethernet driver.
+ *
+ * Copyright (C) 2005-2013 Chelsio Communications.  All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+#ifndef __CXGBE_COMMON_H
+#define	__CXGBE_COMMON_H
+
+#include "shared.h"
+#include "t4_hw.h"
+
+#define	GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
+		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
+		F_CPL_SWITCH | F_SGE | F_ULP_TX)
+
+enum {
+	MAX_NPORTS	= 4,	/* max # of ports */
+	SERNUM_LEN	= 24,	/* Serial # length */
+	EC_LEN		= 16,	/* E/C length */
+	ID_LEN		= 16,	/* ID length */
+	PN_LEN		= 16,	/* Part Number length */
+	MACADDR_LEN	= 12,	/* MAC Address length */
+};
+
+enum { MEM_EDC0, MEM_EDC1, MEM_MC };
+
+enum {
+	MEMWIN0_APERTURE = 2048,
+	MEMWIN0_BASE	 = 0x1b800,
+	MEMWIN1_APERTURE = 32768,
+	MEMWIN1_BASE	 = 0x28000,
+	MEMWIN2_APERTURE = 65536,
+	MEMWIN2_BASE	 = 0x30000,
+};
+
+enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
+
+enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
+
+enum {
+	PAUSE_RX	= 1 << 0,
+	PAUSE_TX	= 1 << 1,
+	PAUSE_AUTONEG	= 1 << 2
+};
+
+struct port_stats {
+	u64 tx_octets;		  /* total # of octets in good frames */
+	u64 tx_frames;		  /* all good frames */
+	u64 tx_bcast_frames;	  /* all broadcast frames */
+	u64 tx_mcast_frames;	  /* all multicast frames */
+	u64 tx_ucast_frames;	  /* all unicast frames */
+	u64 tx_error_frames;	  /* all error frames */
+
+	u64 tx_frames_64;	  /* # of Tx frames in a particular range */
+	u64 tx_frames_65_127;
+	u64 tx_frames_128_255;
+	u64 tx_frames_256_511;
+	u64 tx_frames_512_1023;
+	u64 tx_frames_1024_1518;
+	u64 tx_frames_1519_max;
+
+	u64 tx_drop;		  /* # of dropped Tx frames */
+	u64 tx_pause;		  /* # of transmitted pause frames */
+	u64 tx_ppp0;		  /* # of transmitted PPP prio 0 frames */
+	u64 tx_ppp1;		  /* # of transmitted PPP prio 1 frames */
+	u64 tx_ppp2;		  /* # of transmitted PPP prio 2 frames */
+	u64 tx_ppp3;		  /* # of transmitted PPP prio 3 frames */
+	u64 tx_ppp4;		  /* # of transmitted PPP prio 4 frames */
+	u64 tx_ppp5;		  /* # of transmitted PPP prio 5 frames */
+	u64 tx_ppp6;		  /* # of transmitted PPP prio 6 frames */
+	u64 tx_ppp7;		  /* # of transmitted PPP prio 7 frames */
+
+	u64 rx_octets;		  /* total # of octets in good frames */
+	u64 rx_frames;		  /* all good frames */
+	u64 rx_bcast_frames;	  /* all broadcast frames */
+	u64 rx_mcast_frames;	  /* all multicast frames */
+	u64 rx_ucast_frames;	  /* all unicast frames */
+	u64 rx_too_long;	  /* # of frames exceeding MTU */
+	u64 rx_jabber;		  /* # of jabber frames */
+	u64 rx_fcs_err;		  /* # of received frames with bad FCS */
+	u64 rx_len_err;		  /* # of received frames with length error */
+	u64 rx_symbol_err;	  /* symbol errors */
+	u64 rx_runt;		  /* # of short frames */
+
+	u64 rx_frames_64;	  /* # of Rx frames in a particular range */
+	u64 rx_frames_65_127;
+	u64 rx_frames_128_255;
+	u64 rx_frames_256_511;
+	u64 rx_frames_512_1023;
+	u64 rx_frames_1024_1518;
+	u64 rx_frames_1519_max;
+
+	u64 rx_pause;		  /* # of received pause frames */
+	u64 rx_ppp0;		  /* # of received PPP prio 0 frames */
+	u64 rx_ppp1;		  /* # of received PPP prio 1 frames */
+	u64 rx_ppp2;		  /* # of received PPP prio 2 frames */
+	u64 rx_ppp3;		  /* # of received PPP prio 3 frames */
+	u64 rx_ppp4;		  /* # of received PPP prio 4 frames */
+	u64 rx_ppp5;		  /* # of received PPP prio 5 frames */
+	u64 rx_ppp6;		  /* # of received PPP prio 6 frames */
+	u64 rx_ppp7;		  /* # of received PPP prio 7 frames */
+
+	u64 rx_ovflow0;		  /* drops due to buffer-group 0 overflows */
+	u64 rx_ovflow1;		  /* drops due to buffer-group 1 overflows */
+	u64 rx_ovflow2;		  /* drops due to buffer-group 2 overflows */
+	u64 rx_ovflow3;		  /* drops due to buffer-group 3 overflows */
+	u64 rx_trunc0;		  /* buffer-group 0 truncated packets */
+	u64 rx_trunc1;		  /* buffer-group 1 truncated packets */
+	u64 rx_trunc2;		  /* buffer-group 2 truncated packets */
+	u64 rx_trunc3;		  /* buffer-group 3 truncated packets */
+};
+
+struct lb_port_stats {
+	u64 octets;
+	u64 frames;
+	u64 bcast_frames;
+	u64 mcast_frames;
+	u64 ucast_frames;
+	u64 error_frames;
+
+	u64 frames_64;
+	u64 frames_65_127;
+	u64 frames_128_255;
+	u64 frames_256_511;
+	u64 frames_512_1023;
+	u64 frames_1024_1518;
+	u64 frames_1519_max;
+
+	u64 drop;
+
+	u64 ovflow0;
+	u64 ovflow1;
+	u64 ovflow2;
+	u64 ovflow3;
+	u64 trunc0;
+	u64 trunc1;
+	u64 trunc2;
+	u64 trunc3;
+};
+
+struct tp_tcp_stats {
+	u32 tcpOutRsts;
+	u64 tcpInSegs;
+	u64 tcpOutSegs;
+	u64 tcpRetransSegs;
+};
+
+struct tp_usm_stats {
+	u32 frames;
+	u32 drops;
+	u64 octets;
+};
+
+struct tp_fcoe_stats {
+	u32 framesDDP;
+	u32 framesDrop;
+	u64 octetsDDP;
+};
+
+struct tp_err_stats {
+	u32 macInErrs[4];
+	u32 hdrInErrs[4];
+	u32 tcpInErrs[4];
+	u32 tnlCongDrops[4];
+	u32 ofldChanDrops[4];
+	u32 tnlTxDrops[4];
+	u32 ofldVlanDrops[4];
+	u32 tcp6InErrs[4];
+	u32 ofldNoNeigh;
+	u32 ofldCongDefer;
+};
+
+struct tp_proxy_stats {
+	u32 proxy[4];
+};
+
+struct tp_cpl_stats {
+	u32 req[4];
+	u32 rsp[4];
+	u32 tx_err[4];
+};
+
+struct tp_rdma_stats {
+	u32 rqe_dfr_mod;
+	u32 rqe_dfr_pkt;
+};
+
+struct tp_params {
+	unsigned int ntxchan;		/* # of Tx channels */
+	unsigned int tre;		/* log2 of core clocks per TP tick */
+	unsigned int dack_re;		/* DACK timer resolution */
+	unsigned int la_mask;		/* what events are recorded by TP LA */
+	unsigned short tx_modq[NCHAN];	/* channel to modulation queue map */
+};
+
+struct vpd_params {
+	unsigned int cclk;
+	u8 ec[EC_LEN + 1];
+	u8 sn[SERNUM_LEN + 1];
+	u8 id[ID_LEN + 1];
+	u8 pn[PN_LEN + 1];
+	u8 na[MACADDR_LEN + 1];
+};
+
+struct pci_params {
+	unsigned int  vpd_cap_addr;
+	unsigned char speed;
+	unsigned char width;
+};
+
+/*
+ * Firmware device log.
+ */
+struct devlog_params {
+	u32 memtype;			/* which memory (EDC0, EDC1, MC) */
+	u32 start;			/* start of log in firmware memory */
+	u32 size;			/* size of log */
+};
+
+struct adapter_params {
+	struct tp_params  tp;
+	struct vpd_params vpd;
+	struct pci_params pci;
+	struct devlog_params devlog;
+
+	unsigned int sf_size;		/* serial flash size in bytes */
+	unsigned int sf_nsec;		/* # of flash sectors */
+
+	unsigned int fw_vers;
+	unsigned int tp_vers;
+	u8 api_vers[7];
+
+	unsigned short mtus[NMTUS];
+	unsigned short a_wnd[NCCTRL_WIN];
+	unsigned short b_wnd[NCCTRL_WIN];
+
+	unsigned int mc_size;		/* MC memory size */
+	unsigned int nfilters;		/* size of filter region */
+
+	unsigned int cim_la_size;
+
+	unsigned int nports;		/* # of ethernet ports */
+	unsigned int portvec;
+	unsigned int rev;		/* chip revision */
+	unsigned int offload;
+
+	unsigned char bypass;
+
+	unsigned int ofldq_wr_cred;
+};
+
+enum {					/* chip revisions */
+	T4_REV_A  = 0,
+};
+
+struct trace_params {
+	u32 data[TRACE_LEN / 4];
+	u32 mask[TRACE_LEN / 4];
+	unsigned short snap_len;
+	unsigned short min_len;
+	unsigned char skip_ofst;
+	unsigned char skip_len;
+	unsigned char invert;
+	unsigned char port;
+};
+
+struct link_config {
+	unsigned short supported;	/* link capabilities */
+	unsigned short advertising;	/* advertised capabilities */
+	unsigned short requested_speed;	/* speed user has requested */
+	unsigned short speed;		/* actual link speed */
+	unsigned char  requested_fc;	/* flow control user has requested */
+	unsigned char  fc;		/* actual link flow control */
+	unsigned char  autoneg;		/* autonegotiating? */
+	unsigned char  link_ok;		/* link up? */
+};
+
+#include "adapter.h"
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+#define	PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define	for_each_port(adapter, iter) \
+	for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+	u32 val);
+
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+	void *rpl, bool sleep_ok);
+
+int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
+	int size, void *rpl);
+
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+	unsigned int data_reg, u32 *vals, unsigned int nregs,
+	unsigned int start_idx);
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+	unsigned int data_reg, const u32 *vals,
+	unsigned int nregs, unsigned int start_idx);
+
+struct fw_filter_wr;
+
+void t4_intr_enable(struct adapter *adapter);
+void t4_intr_disable(struct adapter *adapter);
+void t4_intr_clear(struct adapter *adapter);
+int t4_slow_intr_handler(struct adapter *adapter);
+
+int t4_hash_mac_addr(const u8 *addr);
+int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+	struct link_config *lc);
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
+int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
+int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
+int t4_seeprom_wp(struct adapter *adapter, int enable);
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+	unsigned int nwords, u32 *data, int byte_oriented);
+int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+unsigned int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
+int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_tp_version(struct adapter *adapter, u32 *vers);
+int t4_check_fw_version(struct adapter *adapter);
+int t4_init_hw(struct adapter *adapter, u32 fw_params);
+int t4_prep_adapter(struct adapter *adapter);
+int t4_port_init(struct port_info *p, int mbox, int pf, int vf);
+int t4_reinit_adapter(struct adapter *adap);
+void t4_fatal_err(struct adapter *adapter);
+int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
+	int filter_index, int enable);
+void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
+	int filter_index, int *enabled);
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+	int start, int n, const u16 *rspq, unsigned int nrspq);
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+	unsigned int flags);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+	unsigned int flags, unsigned int defq);
+int t4_read_rss(struct adapter *adapter, u16 *entries);
+void t4_read_rss_key(struct adapter *adapter, u32 *key);
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
+void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
+	u32 *valp);
+void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
+	u32 val);
+void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
+	u32 *vfl, u32 *vfh);
+void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
+	u32 vfl, u32 vfh);
+u32 t4_read_rss_pf_map(struct adapter *adapter);
+void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap);
+u32 t4_read_rss_pf_mask(struct adapter *adapter);
+void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask);
+int t4_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
+void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
+void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
+int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
+	size_t n);
+int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data,
+	size_t n);
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+	unsigned int *valp);
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+	const unsigned int *valp);
+int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
+	unsigned int *valp);
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+	unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr);
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
+int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
+	u64 *parity);
+int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 size,
+	__be32 *data);
+int t4_mem_win_read(struct adapter *adap, u32 addr, __be32 *data);
+
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
+void t4_clr_port_stats(struct adapter *adap, int idx);
+
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
+void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]);
+void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
+	unsigned int *kbps, unsigned int *ipg);
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+	unsigned int mask, unsigned int val);
+void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st);
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+	struct tp_tcp_stats *v6);
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+	struct tp_fcoe_stats *st);
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+	const unsigned short *alpha, const unsigned short *beta);
+
+void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
+
+int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps);
+int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg);
+int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
+	unsigned int start, unsigned int n);
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
+int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map);
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
+
+void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
+	const u8 *addr);
+int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+	u64 mask0, u64 mask1, unsigned int crc, bool enable);
+
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+	enum dev_master master, enum dev_state *state);
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
+int t4_fw_bye(struct adapter *adap, unsigned int mbox);
+int t4_early_init(struct adapter *adap, unsigned int mbox);
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	unsigned int vf, unsigned int nparams, const u32 *params, u32 *val);
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	unsigned int vf, unsigned int nparams, const u32 *params,
+	const u32 *val);
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+	unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi,
+	unsigned int cmask, unsigned int pmask, unsigned int exactf,
+	unsigned int rcaps, unsigned int wxcaps);
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+	unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+	unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+	unsigned int pf, unsigned int vf, unsigned int viid);
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+	int mtu, int promisc, int all_multi, int bcast, int vlanex,
+	bool sleep_ok);
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
+	unsigned int viid, bool free, unsigned int naddr, const u8 **addr,
+	u16 *idx, u64 *hash, bool sleep_ok);
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+	int idx, const u8 *addr, bool persist, bool add_smt);
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+	bool ucast, u64 vec, bool sleep_ok);
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+	bool rx_en, bool tx_en);
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+	unsigned int nblinks);
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+	unsigned int mmd, unsigned int reg, unsigned int *valp);
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+	unsigned int mmd, unsigned int reg, unsigned int val);
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+	unsigned int pf, unsigned int vf, unsigned int iqid, unsigned int fl0id,
+	unsigned int fl1id);
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	unsigned int vf, unsigned int iqtype, unsigned int iqid,
+	unsigned int fl0id, unsigned int fl1id);
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	unsigned int vf, unsigned int eqid);
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	unsigned int vf, unsigned int eqid);
+int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+	enum ctxt_type ctype, u32 *data);
+int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
+	enum ctxt_type ctype, u32 *data);
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+
+/* common.c */
+int is_offload(const struct adapter *adap);
+unsigned int core_ticks_per_usec(const struct adapter *adap);
+int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, int size,
+	void *rpl);
+int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, int size,
+	void *rpl);
+unsigned int us_to_core_ticks(const struct adapter *adap, unsigned int us);
+unsigned int core_ticks_to_us(const struct adapter *adapter,
+	unsigned int ticks);
+unsigned int dack_ticks_to_usec(const struct adapter *adap, unsigned int ticks);
+int is_bypass(const adapter_t *adap);
+int is_bypass_device(int device);
+int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, int polarity,
+	int attempts, int delay);
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+	int polarity, int attempts, int delay, u32 *valp);
+
+#endif /* __CXGBE_COMMON_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/cxgbe/common/t4_hw.c	Thu May 23 09:51:05 2013 -0400
@@ -0,0 +1,5042 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * This file is part of the Chelsio T4 Ethernet driver.
+ *
+ * Copyright (C) 2003-2013 Chelsio Communications.  All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+#include <sys/queue.h>
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_regs_values.h"
+#include "t4fw_interface.h"
+#include "t4_fw.h"
+
+/*
+ *	t4_wait_op_done_val - wait until an operation is completed
+ *	@adapter: the adapter performing the operation
+ *	@reg: the register to check for completion
+ *	@mask: a single-bit field within @reg that indicates completion
+ *	@polarity: the value of the field when the operation is completed
+ *	@attempts: number of check iterations
+ *	@delay: delay in usecs between iterations
+ *	@valp: where to store the value of the register at completion time
+ *
+ *	Wait until an operation is completed by checking a bit in a register
+ *	up to @attempts times.  If @valp is not NULL the value of the register
+ *	at the time it indicated completion is stored there.  Returns 0 if the
+ *	operation completes and	-EAGAIN	otherwise.
+ */
+int
+t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+    int polarity, int attempts, int d, u32 *valp)
+{
+	int rc = 0;
+
+	/* LINTED: E_CONSTANT_CONDITION */
+	while (1) {
+		u32 val = t4_read_reg(adapter, reg);
+
+		if (!!(val & mask) == polarity) {
+			if (valp != NULL)
+				*valp = val;
+			goto done;
+		}
+		if (--attempts == 0) {
+			rc = -EAGAIN;
+			goto done;
+		}
+		if (d != 0)
+			udelay(d);
+	}
+
+done:
+	return (rc);
+}
+
+/*
+ *	t4_set_reg_field - set a register field to a value
+ *	@adapter: the adapter to program
+ *	@addr: the register address
+ *	@mask: specifies the portion of the register to modify
+ *	@val: the new value for the register field
+ *
+ *	Sets a register field specified by the supplied mask to the
+ *	given value.
+ */
+void
+t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, u32 val)
+{
+	u32 v = t4_read_reg(adapter, addr) & ~mask;
+
+	t4_write_reg(adapter, addr, v | val);
+	(void) t4_read_reg(adapter, addr);	/* flush */
+}
+
+/*
+ *	t4_read_indirect - read indirectly addressed registers
+ *	@adap: the adapter
+ *	@addr_reg: register holding the indirect address
+ *	@data_reg: register holding the value of the indirect register
+ *	@vals: where the read register values are stored
+ *	@nregs: how many indirect registers to read
+ *	@start_idx: index of first indirect register to read
+ *
+ *	Reads registers that are accessed indirectly through an address/data
+ *	register pair.
+ */
+void
+t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+    unsigned int data_reg, u32 *vals, unsigned int nregs,
+    unsigned int start_idx)
+{
+	while (nregs--) {
+		t4_write_reg(adap, addr_reg, start_idx);
+		*vals++ = t4_read_reg(adap, data_reg);
+		start_idx++;
+	}
+}
+
+/*
+ *	t4_write_indirect - write indirectly addressed registers
+ *	@adap: the adapter
+ *	@addr_reg: register holding the indirect addresses
+ *	@data_reg: register holding the value for the indirect registers
+ *	@vals: values to write
+ *	@nregs: how many indirect registers to write
+ *	@start_idx: address of first indirect register to write
+ *
+ *	Writes a sequential block of registers that are accessed indirectly
+ *	through an address/data register pair.
+ */
+void
+t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+    unsigned int data_reg, const u32 *vals, unsigned int nregs,
+    unsigned int start_idx)
+{
+	while (nregs--) {
+		t4_write_reg(adap, addr_reg, start_idx++);
+		t4_write_reg(adap, data_reg, *vals++);
+	}
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void
+get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, u32 mbox_addr)
+{
+	for (/* */; nflit; nflit--, mbox_addr += 8)
+		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
+}
+
+/*
+ * Handle a FW assertion reported in a mailbox.
+ */
+static void
+fw_asrt(struct adapter *adap, u32 mbox_addr)
+{
+	struct fw_debug_cmd asrt;
+
+	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof (asrt) / 8, mbox_addr);
+	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %x, val1 %x",
+	    asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
+	    ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+}
+
+#define	X_CIM_PF_NOACCESS 0xeeeeeeee
+/*
+ *	t4_wr_mbox_meat - send a command to FW through the given mailbox
+ *	@adap: the adapter
+ *	@mbox: index of the mailbox to use
+ *	@cmd: the command to write
+ *	@size: command length in bytes
+ *	@rpl: where to optionally store the reply
+ *	@sleep_ok: if true we may sleep while awaiting command completion
+ *
+ *	Sends the given command to FW through the selected mailbox and waits
+ *	for the FW to execute the command.  If @rpl is not %NULL it is used to
+ *	store the FW's reply to the command.  The command and its optional
+ *	reply are of the same length.  Some FW commands like RESET and
+ *	INITIALIZE can take a considerable amount of time to execute.
+ *	@sleep_ok determines whether we may sleep while awaiting the response.
+ *	If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ *	The return value is 0 on success or a negative errno on failure.  A
+ *	failure can happen either because we are not able to execute the
+ *	command or FW executes it but signals an error.  In the latter case
+ *	the return value is the error code indicated by FW (negated).
+ */
+int
+t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+    void *rpl, bool sleep_ok)
+{
+	/*
+	 * We delay in small increments at first in an effort to maintain
+	 * responsiveness for simple, fast executing commands but then back
+	 * off to larger delays to a maximum retry delay.
+	 */
+	static const int d[] = {
+		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
+	};
+
+	u32 v;
+	u64 res;
+	int i, ms, delay_idx;
+	const __be64 *p = cmd;
+
+	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
+	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
+
+	if ((size & 15) || size > MBOX_LEN)
+		return (-EINVAL);
+
+	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
+	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
+		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
+
+	if (v != X_MBOWNER_PL)
+		return (v ? -EBUSY : -ETIMEDOUT);
+
+	for (i = 0; i < size; i += 8, p++)
+		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
+
+	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
+	(void) t4_read_reg(adap, ctl_reg);	/* flush write */
+
+	delay_idx = 0;
+	ms = d[0];
+
+	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+		if (sleep_ok != 0) {
+			ms = d[delay_idx];  /* last element may repeat */
+			if (delay_idx < ARRAY_SIZE(d) - 1)
+				delay_idx++;
+			msleep(ms);
+		} else
+			mdelay(ms);
+
+		v = t4_read_reg(adap, ctl_reg);
+		if (v == X_CIM_PF_NOACCESS)
+			continue;
+		if (G_MBOWNER(v) == X_MBOWNER_PL) {
+			if (!(v & F_MBMSGVALID)) {
+				t4_write_reg(adap, ctl_reg,
+				    V_MBOWNER(X_MBOWNER_NONE));
+				continue;
+			}
+
+			res = t4_read_reg64(adap, data_reg);
+			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
+				fw_asrt(adap, data_reg);
+				res = V_FW_CMD_RETVAL(EIO);
+			} else if (rpl != NULL)
+				get_mbox_rpl(adap, rpl, size / 8, data_reg);
+			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
+			return (-G_FW_CMD_RETVAL((int)res));
+		}
+	}
+
+	CH_ERR(adap, "command %x in mailbox %d timed out",
+	    *(const u8 *)cmd, mbox);
+	return (-ETIMEDOUT);
+}
+
+/*
+ *	t4_mc_read - read from MC through backdoor accesses
+ *	@adap: the adapter
+ *	@addr: address of first byte requested
+ *	@data: 64 bytes of data containing the requested address
+ *	@ecc: where to store the corresponding 64-bit ECC word
+ *
+ *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ *	that covers the requested address @addr.  If @parity is not %NULL it
+ *	is assigned the 64-bit ECC word for the read data.
+ */
+int
+t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
+{
+	int i;
+
+	if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
+		return (-EBUSY);
+	t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
+	t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
+	t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
+	t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
+	    V_BIST_CMD_GAP(1));
+	i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
+	if (i != 0)
+		return (i);
+
+#define	MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
+
+	for (i = 15; i >= 0; i--)
+		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
+	if (ecc != NULL)
+		*ecc = t4_read_reg64(adap, MC_DATA(16));
+#undef MC_DATA
+	return (0);
+}
+
+/*
+ *	t4_edc_read - read from EDC through backdoor accesses
+ *	@adap: the adapter
+ *	@idx: which EDC to access
+ *	@addr: address of first byte requested
+ *	@data: 64 bytes of data containing the requested address
+ *	@ecc: where to store the corresponding 64-bit ECC word
+ *
+ *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ *	that covers the requested address @addr.  If @parity is not %NULL it
+ *	is assigned the 64-bit ECC word for the read data.
+ */
+int
+t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
+{
+	int i;
+
+	idx *= EDC_STRIDE;
+	if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
+		return (-EBUSY);
+	t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
+	t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
+	t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
+	t4_write_reg(adap, A_EDC_BIST_CMD + idx,
+	    V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
+	i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
+	if (i != 0)
+		return (i);
+
+#define	EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
+
+	for (i = 15; i >= 0; i--)
+		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
+	if (ecc != NULL)
+		*ecc = t4_read_reg64(adap, EDC_DATA(16));
+#undef EDC_DATA
+	return (0);
+}
+
+/*
+ *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
+ *	@adap: the adapter
+ *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ *	@addr: address within indicated memory type
+ *	@len: amount of memory to read
+ *	@buf: host memory buffer
+ *
+ *	Reads an [almost] arbitrary memory region in the firmware: the
+ *	firmware memory address, length and host buffer must be aligned on
+ *	32-bit boudaries.  The memory is returned as a raw byte sequence from
+ *	the firmware's memory.  If this memory contains data structures which
+ *	contain multi-byte integers, it's the callers responsibility to
+ *	perform appropriate byte order conversions.
+ */
+int
+t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf)
+{
+	u32 pos, start, end, offset;
+	int ret;
+
+	/*
+	 * Argument sanity checks ...
+	 */
+	if ((addr & 0x3) || (len & 0x3))
+		return (-EINVAL);
+
+	/*
+	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
+	 * need to round down the start and round up the end.  We'll start
+	 * copying out of the first line at (addr - start) a word at a time.
+	 */
+	start = addr & ~(64-1);
+	end = (addr + len + 64-1) & ~(64-1);
+	offset = (addr - start)/sizeof (__be32);
+
+	for (pos = start; pos < end; pos += 64, offset = 0) {
+		__be32 data[16];
+
+		/*
+		 * Read the chip's memory block and bail if there's an error.
+		 */
+		if (mtype == MEM_MC)
+			ret = t4_mc_read(adap, pos, data, NULL);
+		else
+			ret = t4_edc_read(adap, mtype, pos, data, NULL);
+		if (ret != 0)
+			return (ret);
+
+		/*
+		 * Copy the data into the caller's memory buffer.
+		 */
+		while (offset < 16 && len > 0) {
+			*buf++ = data[offset++];
+			len -= sizeof (__be32);
+		}
+	}
+
+	return (0);
+}
+
+/*
+ *      t4_mem_win_rw - read/write memory through PCIE memory window
+ *      @adap: the adapter
+ *      @addr: address of first byte requested
+ *      @data: MEMWIN0_APERTURE bytes of data containing the requested address
+ *      @dir: direction of transfer 1 => read, 0 => write
+ *
+ *      Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
+ *      MEMWIN0_APERTURE-byte-aligned address that covers the requested
+ *      address @addr.
+ */
+static int
+t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
+{
+	int i;
+
+	/*
+	 * Setup offset into PCIE memory window.  Address must be a
+	 * MEMWIN0_APERTURE-byte-aligned address.  (Read back MA register to
+	 * ensure that changes propagate before we attempt to use the new
+	 * values.)
+	 */
+	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
+	    addr & ~(MEMWIN0_APERTURE - 1));
+	(void) t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+	    0));
+
+	/* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
+	for (i = 0; i < MEMWIN0_APERTURE; i = i + 0x4) {
+		if (dir != 0)
+			*data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
+		else
+			t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
+	}
+
+	return (0);
+}
+
+int
+t4_mem_win_read(struct adapter *adap, u32 addr, __be32 *data)
+{
+	return (t4_mem_win_rw(adap, addr, data, 1));
+}
+
+/*
+ * Partial EEPROM Vital Product Data structure.  Includes only the ID and
+ * VPD-R header.
+ */
+struct t4_vpd_hdr {
+	u8  id_tag;
+	u8  id_len[2];
+	u8  id_data[ID_LEN];
+	u8  vpdr_tag;
+	u8  vpdr_len[2];
+};
+
+/*
+ * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
+ */
+#define	EEPROM_MAX_RD_POLL	40
+#define	EEPROM_MAX_WR_POLL	6
+#define	EEPROM_STAT_ADDR	0x7bfc
+#define	VPD_BASE		0x400
+#define	VPD_BASE_OLD		0
+#define	VPD_LEN			512
+#define	VPD_INFO_FLD_HDR_SIZE	3
+
+/*
+ *	t4_seeprom_read - read a serial EEPROM location
+ *	@adapter: adapter to read
+ *	@addr: EEPROM virtual address
+ *	@data: where to store the read data
+ *
+ *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
+ *	VPD capability.  Note that this function must be called with a virtual
+ *	address.
+ */
+int
+t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
+{
+	u16 val;
+	int attempts = EEPROM_MAX_RD_POLL;
+	unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+	if (addr >= EEPROMVSIZE || (addr & 3))
+		return (-EINVAL);
+
+	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
+	do {
+		udelay(10);
+		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
+	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
+
+	if (!(val & PCI_VPD_ADDR_F)) {
+		CH_ERR(adapter, "reading EEPROM address 0x%x failed", addr);
+		return (-EIO);
+	}
+	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
+	*data = le32_to_cpu(*data);
+	return (0);
+}
+
+/*
+ *	t4_seeprom_write - write a serial EEPROM location
+ *	@adapter: adapter to write
+ *	@addr: virtual EEPROM address
+ *	@data: value to write
+ *
+ *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
+ *	VPD capability.  Note that this function must be called with a virtual
+ *	address.
+ */
+int
+t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
+{
+	u16 val;
+	int attempts = EEPROM_MAX_WR_POLL;
+	unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+	if (addr >= EEPROMVSIZE || (addr & 3))
+		return (-EINVAL);
+
+	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
+	    cpu_to_le32(data));
+	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
+	    (u16)addr | PCI_VPD_ADDR_F);
+	do {
+		msleep(1);
+		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
+	} while ((val & PCI_VPD_ADDR_F) && --attempts);
+
+	if (val & PCI_VPD_ADDR_F) {
+		CH_ERR(adapter, "write to EEPROM address %x failed", addr);
+		return (-EIO);
+	}
+	return (0);
+}
+
+/*
+ *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
+ *	@phys_addr: the physical EEPROM address
+ *	@fn: the PCI function number
+ *	@sz: size of function-specific area
+ *
+ *	Translate a physical EEPROM address to virtual.  The first 1K is
+ *	accessed through virtual addresses starting at 31K, the rest is
+ *	accessed through virtual addresses starting at 0.
+ *
+ *	The mapping is as follows:
+ *	[0..1K) -> [31K..32K)
+ *	[1K..1K+A) -> [ES-A..ES)
+ *	[1K+A..ES) -> [0..ES-A-1K)
+ *
+ *	where A = @fn * @sz, and ES = EEPROM size.
+ */
+int
+t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+	fn *= sz;
+	if (phys_addr < 1024)
+		return (phys_addr + (31 << 10));
+	if (phys_addr < 1024 + fn)
+		return (EEPROMSIZE - fn + phys_addr - 1024);
+	if (phys_addr < EEPROMSIZE)
+		return (phys_addr - 1024 - fn);
+	return (-EINVAL);
+}
+
+/*
+ *	t4_seeprom_wp - enable/disable EEPROM write protection
+ *	@adapter: the adapter
+ *	@enable: whether to enable or disable write protection
+ *
+ *	Enables or disables write protection on the serial EEPROM.
+ */
+int
+t4_seeprom_wp(struct adapter *adapter, int enable)
+{
+	return (t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0));
+}
+
+/*
+ *	get_vpd_keyword_val - Locates an information field keyword in the VPD
+ *	@v: Pointer to buffered vpd data structure
+ *	@kw: The keyword to search for
+ *
+ *	Returns the value of the information field keyword or
+ *	-ENOENT otherwise.
+ */
+static int
+get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
+{
+	int i;
+	unsigned int offset, len;
+	const u8 *buf = &v->id_tag;
+	const u8 *vpdr_len = &v->vpdr_tag;
+	offset = sizeof (struct t4_vpd_hdr);
+	len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
+
+	if (len + sizeof (struct t4_vpd_hdr) > VPD_LEN) {
+		return (-ENOENT);
+	}
+
+	for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len; ) {
+
+		if (memcmp(buf + i, kw, 2) == 0) {
+			i += VPD_INFO_FLD_HDR_SIZE;
+			return (i);
+		}
+
+		i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
+	}
+
+	return (-ENOENT);
+}
+
+/*
+ *	get_vpd_params - read VPD parameters from VPD EEPROM
+ *	@adapter: adapter to read
+ *	@p: where to store the parameters
+ *
+ *	Reads card parameters stored in VPD EEPROM.
+ */
+static int
+get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+	int i, ret, addr;
+	int ec, sn, pn, na;
+	u8 vpd[VPD_LEN], csum;
+	const struct t4_vpd_hdr *v;
+
+	/*
+	 * Card information normally starts at VPD_BASE but early cards had
+	 * it at 0.
+	 */
+	/* LINTED: E_BAD_PTR_CAST_ALIGN */
+	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
+	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+	for (i = 0; i < sizeof (vpd); i += 4) {
+		/* LINTED: E_BAD_PTR_CAST_ALIGN */
+		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
+		if (ret != 0)
+			return (ret);
+	}
+	v = (const struct t4_vpd_hdr *)vpd;
+
+#define	FIND_VPD_KW(var, name) do { \
+	var = get_vpd_keyword_val(v, name); \
+	if (var < 0) { \
+		CH_ERR(adapter, "missing VPD keyword " name); \
+		return (-EINVAL); \
+	} \
+} while (0)
+
+	/* LINTED: E_CONSTANT_CONDITION */
+	FIND_VPD_KW(i, "RV");
+	for (csum = 0; i >= 0; i--)
+		csum += vpd[i];
+
+	if (csum != 0) {
+		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u", csum);
+		return (-EINVAL);
+	}
+	/* LINTED: E_CONSTANT_CONDITION */
+	FIND_VPD_KW(ec, "EC");
+	/* LINTED: E_CONSTANT_CONDITION */
+	FIND_VPD_KW(sn, "SN");
+	/* LINTED: E_CONSTANT_CONDITION */
+	FIND_VPD_KW(pn, "PN");
+	/* LINTED: E_CONSTANT_CONDITION */
+	FIND_VPD_KW(na, "NA");
+#undef FIND_VPD_KW
+
+	(void) memcpy(p->id, v->id_data, ID_LEN);
+	(void) strstrip(p->id);
+	(void) memcpy(p->ec, vpd + ec, EC_LEN);
+	(void) strstrip(p->ec);
+	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
+	(void) memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+	(void) strstrip(p->sn);
+	(void) memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+	(void) strstrip(p->pn);
+	(void) memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+	(void) strstrip(p->na);
+
+	return (0);
+}
+
+/* serial flash and firmware constants and flash config file constants */
+enum {
+	SF_ATTEMPTS = 10,		/* max retries for SF operations */
+
+	/* flash command opcodes */
+	SF_PROG_PAGE    = 2,		/* program page */
+	SF_WR_DISABLE   = 4,		/* disable writes */
+	SF_RD_STATUS    = 5,		/* read status register */
+	SF_WR_ENABLE    = 6,		/* enable writes */
+	SF_RD_DATA_FAST = 0xb,		/* read flash */
+	SF_RD_ID	= 0x9f,		/* read ID */
+	SF_ERASE_SECTOR = 0xd8,		/* erase sector */
+
+};
+
+/*
+ *	sf1_read - read data from the serial flash
+ *	@adapter: the adapter
+ *	@byte_cnt: number of bytes to read
+ *	@cont: whether another operation will be chained
+ *	@lock: whether to lock SF for PL access only
+ *	@valp: where to store the read data
+ *
+ *	Reads up to 4 bytes of data from the serial flash.  The location of
+ *	the read needs to be specified prior to calling this by issuing the
+ *	appropriate commands to the serial flash.
+ */
+static int
+sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock,
+    u32 *valp)
+{
+	int ret;
+
+	if (!byte_cnt || byte_cnt > 4)
+		return (-EINVAL);
+	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
+		return (-EBUSY);
+	t4_write_reg(adapter, A_SF_OP,
+	    V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
+	if (!ret)
+		*valp = t4_read_reg(adapter, A_SF_DATA);
+	return (ret);
+}
+
+/*
+ *	sf1_write - write data to the serial flash
+ *	@adapter: the adapter
+ *	@byte_cnt: number of bytes to write
+ *	@cont: whether another operation will be chained
+ *	@lock: whether to lock SF for PL access only
+ *	@val: value to write
+ *
+ *	Writes up to 4 bytes of data to the serial flash.  The location of
+ *	the write needs to be specified prior to calling this by issuing the
+ *	appropriate commands to the serial flash.
+ */
+static int
+sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock,
+    u32 val)
+{
+	if (!byte_cnt || byte_cnt > 4)
+		return (-EINVAL);
+	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
+		return (-EBUSY);
+	t4_write_reg(adapter, A_SF_DATA, val);
+	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
+	    V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
+	return (t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5));
+}
+
+/*
+ *	flash_wait_op - wait for a flash operation to complete
+ *	@adapter: the adapter
+ *	@attempts: max number of polls of the status register
+ *	@delay: delay between polls in ms
+ *
+ *	Wait for a flash operation to complete by polling the status register.
+ */
+static int
+flash_wait_op(struct adapter *adapter, int attempts, int d)
+{
+	int ret = 0;
+	u32 status;
+
+	/* LINTED: E_CONSTANT_CONDITION */
+	while (1) {
+		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
+		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
+			goto done;
+		if (!(status & 1))
+			goto done;
+		if (--attempts == 0) {
+			ret = -EAGAIN;
+			goto done;
+		}
+		if (d != 0)
+			msleep(d);
+	}
+
+done:
+	return (ret);
+}
+
+/*
+ *	t4_read_flash - read words from serial flash
+ *	@adapter: the adapter
+ *	@addr: the start address for the read
+ *	@nwords: how many 32-bit words to read
+ *	@data: where to store the read data
+ *	@byte_oriented: whether to store data as bytes or as words
+ *
+ *	Read the specified number of 32-bit words from the serial flash.
+ *	If @byte_oriented is set the read data is stored as a byte array
+ *	(i.e., big-endian), otherwise as 32-bit words in the platform's
+ *	natural endianess.
+ */
+int
+t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords,
+    u32 *data, int byte_oriented)
+{
+	int ret;
+
+	if (addr + nwords * sizeof (u32) > adapter->params.sf_size ||
+	    (addr & 3))
+		return (-EINVAL);
+
+	addr = swab32(addr) | SF_RD_DATA_FAST;
+
+	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
+	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
+		return (ret);
+
+	for (/* */; nwords; nwords--, data++) {
+		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
+		if (nwords == 1)
+			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
+		if (ret != 0)
+			return (ret);
+		if (byte_oriented != 0)
+			*data = htonl(*data);
+	}
+	return (0);
+}
+
+/*
+ *	t4_write_flash - write up to a page of data to the serial flash
+ *	@adapter: the adapter
+ *	@addr: the start address to write
+ *	@n: length of data to write in bytes
+ *	@data: the data to write
+ *
+ *	Writes up to a page of data (256 bytes) to the serial flash starting
+ *	at the given address.  All the data must be written to the same page.
+ */
+static int
+t4_write_flash(struct adapter *adapter, unsigned int addr, unsigned int n,
+    const u8 *data)
+{
+	int ret;
+	u32 buf[SF_PAGE_SIZE / 4];
+	unsigned int i, c, left, val, offset = addr & 0xff;
+
+	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
+		return (-EINVAL);
+
+	val = swab32(addr) | SF_PROG_PAGE;
+
+	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
+		goto unlock;
+
+	for (left = n; left; left -= c) {
+		c = min(left, 4U);
+		for (val = 0, i = 0; i < c; ++i)
+			val = (val << 8) + *data++;
+
+		ret = sf1_write(adapter, c, c != left, 1, val);
+		if (ret != 0)
+			goto unlock;
+	}
+	ret = flash_wait_op(adapter, 8, 1);
+	if (ret != 0)
+		goto unlock;
+
+	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
+
+	/* Read the page to verify the write succeeded */
+	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+	if (ret != 0)
+		return (ret);
+
+	if (memcmp(data - n, (u8 *)buf + offset, n)) {
+		CH_ERR(adapter, "failed to correctly write the flash page "
+		    "at %x", addr);
+		return (-EIO);
+	}
+	return (0);
+
+unlock:
+	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
+	return (ret);
+}
+
+/*
+ *	t4_get_fw_version - read the firmware version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the FW version from flash.
+ */
+int
+t4_get_fw_version(struct adapter *adapter, u32 *vers)
+{
+	return (t4_read_flash(adapter,
+	    FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1, vers, 0));
+}
+
+/*
+ *	t4_get_tp_version - read the TP microcode version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the TP microcode version from flash.
+ */
+int
+t4_get_tp_version(struct adapter *adapter, u32 *vers)
+{
+	return (t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
+	    tp_microcode_ver), 1, vers, 0));
+}
+
+/*
+ *	t4_check_fw_version - check if the FW is compatible with this driver
+ *	@adapter: the adapter
+ *
+ *	Checks if an adapter's FW is compatible with the driver.  Returns 0
+ *	if there's exact match, a negative error if the version could not be
+ *	read or there's a major version mismatch, and a positive value if the
+ *	expected major version is found but there's a minor version mismatch.
+ */
+int
+t4_check_fw_version(struct adapter *adapter)
+{
+	u32 api_vers[2];
+	int ret, major, minor, micro;
+
+	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
+	if (!ret)
+		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
+	if (!ret)
+		ret = t4_read_flash(adapter,
+		    FLASH_FW_START + offsetof(struct fw_hdr, intfver_nic), 2,
+		    api_vers, 1);
+	if (ret != 0)
+		return (ret);
+
+	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
+	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
+	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
+	(void) memcpy(adapter->params.api_vers, api_vers,
+	    sizeof (adapter->params.api_vers));
+
+	if (major != T4FW_VERSION_MAJOR) {	/* major mismatch - fail */
+		CH_ERR(adapter, "card FW has major version %u, driver wants %u",
+		    major, T4FW_VERSION_MAJOR);
+		return (-EINVAL);
+	}
+
+	if (minor == T4FW_VERSION_MINOR && micro == T4FW_VERSION_MICRO)
+		return (0);			/* perfect match */
+
+	/* Minor/micro version mismatch.  Report it but often it's OK. */
+	return (1);
+}
+
+/*
+ *	t4_flash_erase_sectors - erase a range of flash sectors
+ *	@adapter: the adapter
+ *	@start: the first sector to erase
+ *	@end: the last sector to erase
+ *
+ *	Erases the sectors in the given inclusive range.
+ */
+static int
+t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
+{
+	int ret = 0;
+
+	while (start <= end) {
+		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+		    (ret = sf1_write(adapter, 4, 0, 1,
+		    SF_ERASE_SECTOR | (start << 8))) != 0 ||
+		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
+			CH_ERR(adapter, "erase of flash sector %d failed, "
+			    "error %d", start, ret);
+			break;
+		}
+		start++;
+	}
+	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
+	return (ret);
+}
+
+/*
+ *	t4_flash_cfg_addr - return the address of the flash configuration file
+ *	@adapter: the adapter
+ *
+ *	Return the address within the flash where the Firmware Configuration
+ *	File is stored.
+ */
+unsigned int
+t4_flash_cfg_addr(struct adapter *adapter)
+{
+	if (adapter->params.sf_size == 0x100000)
+		return (FLASH_FPGA_CFG_START);
+	else
+		return (FLASH_CFG_START);
+}
+
+/*
+ *	t4_load_cfg - download config file
+ *	@adap: the adapter
+ *	@cfg_data: the cfg text file to write
+ *	@size: text file size
+ *
+ *	Write the supplied config text file to the card's serial flash.
+ */
+int
+t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+{
+	int ret, i, n;
+	unsigned int addr;
+	unsigned int flash_cfg_start_sec;
+	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+
+	addr = t4_flash_cfg_addr(adap);
+	flash_cfg_start_sec = addr / SF_SEC_SIZE;
+
+	if (!size) {
+		CH_ERR(adap, "cfg file has no data");
+		return (-EINVAL);
+	}
+
+	if (size > FLASH_CFG_MAX_SIZE) {
+		CH_ERR(adap, "cfg file too large, max is %u bytes",
+		    FLASH_CFG_MAX_SIZE);
+		return (-EFBIG);
+	}
+
+	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
+	    sf_sec_size);
+	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
+	    flash_cfg_start_sec + i - 1);
+	if (ret != 0)
+		goto out;
+
+	/* this will write to the flash up to SF_PAGE_SIZE at a time */
+	for (i = 0; i < size; i += SF_PAGE_SIZE) {
+		if ((size - i) <  SF_PAGE_SIZE)
+			n = size - i;
+		else
+			n = SF_PAGE_SIZE;
+		ret = t4_write_flash(adap, addr, n, cfg_data);
+		if (ret != 0)
+			goto out;
+
+		addr += SF_PAGE_SIZE;
+		cfg_data += SF_PAGE_SIZE;
+	}
+
+out:
+	if (ret != 0)
+		CH_ERR(adap, "config file download failed %d", ret);
+	return (ret);
+}
+
+/*
+ *	t4_load_fw - download firmware
+ *	@adap: the adapter
+ *	@fw_data: the firmware image to write
+ *	@size: image size
+ *
+ *	Write the supplied firmware image to the card's serial flash.
+ */
+int
+t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
+{
+	u32 csum;
+	int ret, addr;
+	unsigned int i;
+	u8 first_page[SF_PAGE_SIZE];
+	/* LINTED: E_BAD_PTR_CAST_ALIGN */
+	const u32 *p = (const u32 *)fw_data;
+	/* LINTED: E_BAD_PTR_CAST_ALIGN */
+	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
+	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+
+	if (!size) {
+		CH_ERR(adap, "FW image has no data");
+		return (-EINVAL);
+	}
+	if (size & 511) {
+		CH_ERR(adap, "FW image size not multiple of 512 bytes");
+		return (-EINVAL);
+	}
+	if (ntohs(hdr->len512) * 512 != size) {
+		CH_ERR(adap, "FW image size differs from size in FW header");
+		return (-EINVAL);
+	}
+	if (size > FLASH_FW_MAX_SIZE) {
+		CH_ERR(adap, "FW image too large, max is %u bytes\n",
+		    FLASH_FW_MAX_SIZE);
+		return (-EFBIG);
+	}
+
+	for (csum = 0, i = 0; i < size / sizeof (csum); i++)
+		csum += ntohl(p[i]);
+
+	if (csum != 0xffffffff) {
+		CH_ERR(adap, "corrupted firmware image, checksum %x",
+		    csum);
+		return (-EINVAL);
+	}
+
+	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
+	ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
+	    FLASH_FW_START_SEC + i - 1);
+	if (ret != 0)
+		goto out;
+
+	/*
+	 * We write the correct version at the end so the driver can see a bad
+	 * version if the FW write fails.  Start by writing a copy of the
+	 * first page with a bad version.
+	 */
+	(void) memcpy(first_page, fw_data, SF_PAGE_SIZE);
+	/* LINTED: E_BAD_PTR_CAST_ALIGN */
+	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+	ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page);
+	if (ret != 0)
+		goto out;
+
+	addr = FLASH_FW_START;
+	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+		addr += SF_PAGE_SIZE;
+		fw_data += SF_PAGE_SIZE;
+		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+		if (ret != 0)
+			goto out;
+	}
+
+	ret = t4_write_flash(adap,
+	    FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
+	    sizeof (hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+out:
+	if (ret != 0)
+		CH_ERR(adap, "firmware download failed, error %d", ret);
+	return (ret);
+}
+
+/*
+ *	t4_read_cimq_cfg - read CIM queue configuration
+ *	@adap: the adapter
+ *	@base: holds the queue base addresses in bytes
+ *	@size: holds the queue sizes in bytes
+ *	@thres: holds the queue full thresholds in bytes
+ *
+ *	Returns the current configuration of the CIM queues, starting with
+ *	the IBQs, then the OBQs.
+ */
+void
+t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+{
+	unsigned int i, v;
+
+	for (i = 0; i < CIM_NUM_IBQ; i++) {
+		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
+		    V_QUENUMSELECT(i));
+		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
+		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
+		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
+	}
+	for (i = 0; i < CIM_NUM_OBQ; i++) {
+		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
+		    V_QUENUMSELECT(i));
+		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
+		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
+	}
+}
+
+/*
+ *	t4_read_cim_ibq - read the contents of a CIM inbound queue
+ *	@adap: the adapter
+ *	@qid: the queue index
+ *	@data: where to store the queue contents
+ *	@n: capacity of @data in 32-bit words
+ *
+ *	Reads the contents of the selected CIM queue starting at address 0 up
+ *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
+ *	error and the number of 32-bit words actually read on success.
+ */
+int
+t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+	int i, err;
+	unsigned int addr;
+	const unsigned int nwords = CIM_IBQ_SIZE * 4;
+
+	if (qid > 5 || (n & 3))
+		return (-EINVAL);
+
+	addr = qid * nwords;
+	if (n > nwords)
+		n = nwords;
+
+	for (i = 0; i < n; i++, addr++) {
+		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
+		    F_IBQDBGEN);
+		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
+		    2, 1);
+		if (err != 0)
+			return (err);
+		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+	}
+	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
+	return (i);
+}
+
+/*
+ *	t4_read_cim_obq - read the contents of a CIM outbound queue
+ *	@adap: the adapter
+ *	@qid: the queue index
+ *	@data: where to store the queue contents
+ *	@n: capacity of @data in 32-bit words
+ *
+ *	Reads the contents of the selected CIM queue starting at address 0 up
+ *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
+ *	error and the number of 32-bit words actually read on success.
+ */
+int
+t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+	int i, err;
+	unsigned int addr, v, nwords;
+
+	if (qid > 5 || (n & 3))
+		return (-EINVAL);
+
+	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
+	    V_QUENUMSELECT(qid));
+	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+
+	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
+	nwords = G_CIMQSIZE(v) * 64;  /* same */
+	if (n > nwords)
+		n = nwords;
+
+	for (i = 0; i < n; i++, addr++) {
+		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
+		    F_OBQDBGEN);
+		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
+		    2, 1);
+		if (err != 0)
+			return (err);
+		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+	}
+	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
+	return (i);
+}
+
+enum {
+	CIM_QCTL_BASE	  = 0,
+	CIM_CTL_BASE	  = 0x2000,
+	CIM_PBT_ADDR_BASE = 0x2800,
+	CIM_PBT_LRF_BASE  = 0x3000,
+	CIM_PBT_DATA_BASE = 0x3800
+};
+
+/*
+ *	t4_cim_read - read a block from CIM internal address space
+ *	@adap: the adapter
+ *	@addr: the start address within the CIM address space
+ *	@n: number of words to read
+ *	@valp: where to store the result
+ *
+ *	Reads a block of 4-byte words from the CIM intenal address space.
+ */
+int
+t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+    unsigned int *valp)
+{
+	int ret = 0;
+
+	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+		return (-EBUSY);
+
+	for (/* */; !ret && n--; addr += 4) {
+		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
+		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+		    0, 5, 2);
+		if (!ret)
+			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
+	}
+	return (ret);
+}
+
+/*
+ *	t4_cim_write - write a block into CIM internal address space
+ *	@adap: the adapter
+ *	@addr: the start address within the CIM address space
+ *	@n: number of words to write
+ *	@valp: set of values to write
+ *
+ *	Writes a block of 4-byte words into the CIM intenal address space.
+ */
+int
+t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+    const unsigned int *valp)
+{
+	int ret = 0;
+
+	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+		return (-EBUSY);
+
+	for (/* */; !ret && n--; addr += 4) {
+		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
+		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
+		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+		    0, 5, 2);
+	}
+	return (ret);
+}
+
+static int
+t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
+{
+	return (t4_cim_write(adap, addr, 1, &val));
+}
+
+/*
+ *	t4_cim_ctl_read - read a block from CIM control region
+ *	@adap: the adapter
+ *	@addr: the start address within the CIM control region
+ *	@n: number of words to read
+ *	@valp: where to store the result
+ *
+ *	Reads a block of 4-byte words from the CIM control region.
+ */
+int
+t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
+    unsigned int *valp)
+{
+	return (t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp));
+}
+
+/*
+ *	t4_cim_read_la - read CIM LA capture buffer
+ *	@adap: the adapter
+ *	@la_buf: where to store the LA data
+ *	@wrptr: the HW write pointer within the capture buffer
+ *
+ *	Reads the contents of the CIM LA buffer with the most recent entry at
+ *	the end	of the returned data and with the entry at @wrptr first.
+ *	We try to leave the LA in the running state we find it in.
+ */
+int
+t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+{
+	int i, ret;
+	unsigned int cfg, val, idx;
+
+	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+	if (ret != 0)
+		return (ret);
+
+	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
+		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
+		if (ret != 0)
+			return (ret);
+	}
+
+	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+	if (ret != 0)
+		goto restart;
+
+	idx = G_UPDBGLAWRPTR(val);
+	if (wrptr != 0)
+		*wrptr = idx;
+
+	for (i = 0; i < adap->params.cim_la_size; i++) {
+		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
+		    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
+		if (ret != 0)
+			break;
+		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+		if (ret != 0)
+			break;
+		if (val & F_UPDBGLARDEN) {
+			ret = -ETIMEDOUT;
+			break;
+		}
+		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
+		if (ret != 0)
+			break;
+		idx = (idx + 1) & M_UPDBGLARDPTR;
+	}
+restart:
+	if (cfg & F_UPDBGLAEN) {
+		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
+		    cfg & ~F_UPDBGLARDEN);
+		if (!ret)
+			ret = r;
+	}
+	return (ret);
+}
+
+void
+t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+    unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr)
+{
+	int i, j;
+	u32 cfg, val, req, rsp;
+
+	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
+	if (cfg & F_LADBGEN)
+		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
+
+	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
+	req = G_POLADBGWRPTR(val);
+	rsp = G_PILADBGWRPTR(val);
+	if (pif_req_wrptr != NULL)
+		*pif_req_wrptr = req;
+	if (pif_rsp_wrptr != NULL)
+		*pif_rsp_wrptr = rsp;
+
+	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
+		for (j = 0; j < 6; j++) {
+			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
+			    V_PILADBGRDPTR(rsp));
+			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
+			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
+			req++;
+			rsp++;
+		}
+		req = (req + 2) & M_POLADBGRDPTR;
+		rsp = (rsp + 2) & M_PILADBGRDPTR;
+	}
+	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
+}
+
+void
+t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
+{
+	u32 cfg;
+	int i, j, idx;
+
+	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
+	if (cfg & F_LADBGEN)
+		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
+
+	for (i = 0; i < CIM_MALA_SIZE; i++) {
+		for (j = 0; j < 5; j++) {
+			idx = 8 * i + j;
+			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
+			    V_PILADBGRDPTR(idx));
+			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
+			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
+		}
+	}
+	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
+}
+
+/*
+ *	t4_tp_read_la - read TP LA capture buffer
+ *	@adap: the adapter
+ *	@la_buf: where to store the LA data
+ *	@wrptr: the HW write pointer within the capture buffer
+ *
+ *	Reads the contents of the TP LA buffer with the most recent entry at
+ *	the end	of the returned data and with the entry at @wrptr first.
+ *	We leave the LA in the running state we find it in.
+ */
+void
+t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
+{
+	bool last_incomplete;
+	unsigned int i, cfg, val, idx;
+
+	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
+	if (cfg & F_DBGLAENABLE)		/* freeze LA */
+		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
+		    adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
+
+	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
+	idx = G_DBGLAWPTR(val);
+	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
+	if (last_incomplete != 0)
+		idx = (idx + 1) & M_DBGLARPTR;
+	if (wrptr != NULL)
+		*wrptr = idx;
+
+	val &= 0xffff;
+	val &= ~V_DBGLARPTR(M_DBGLARPTR);
+	val |= adap->params.tp.la_mask;
+
+	for (i = 0; i < TPLA_SIZE; i++) {
+		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
+		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
+		idx = (idx + 1) & M_DBGLARPTR;
+	}
+
+	/* Wipe out last entry if it isn't valid */
+	if (last_incomplete != 0)
+		la_buf[TPLA_SIZE - 1] = ~0ULL;
+
+	if (cfg & F_DBGLAENABLE)		/* restore running state */
+		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
+		    cfg | adap->params.tp.la_mask);
+}
+
+void
+t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
+{
+	unsigned int i, j;
+
+	for (i = 0; i < 8; i++) {
+		u32 *p = la_buf + i;
+
+		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
+		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
+		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
+		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
+			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
+	}
+}
+
+#define	ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+		FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+
+/*
+ *	t4_link_start - apply link configuration to MAC/PHY
+ *	@phy: the PHY to setup
+ *	@mac: the MAC to setup
+ *	@lc: the requested link configuration
+ *
+ *	Set up a port's MAC and PHY according to a desired link configuration.
+ *	- If the PHY can auto-negotiate first decide what to advertise, then
+ *	  enable/disable auto-negotiation as desired, and reset.
+ *	- If the PHY does not auto-negotiate just reset it.
+ *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ *	  otherwise do it later based on the outcome of auto-negotiation.
+ */
+int
+t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+    struct link_config *lc)
+{
+	struct fw_port_cmd c;
+	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
+
+	lc->link_ok = 0;
+	if (lc->requested_fc & PAUSE_RX)
+		fc |= FW_PORT_CAP_FC_RX;
+	if (lc->requested_fc & PAUSE_TX)
+		fc |= FW_PORT_CAP_FC_TX;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
+	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+	    FW_LEN16(c));
+
+	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+	} else if (lc->autoneg == AUTONEG_DISABLE) {
+		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+	} else
+		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_restart_aneg - restart autonegotiation
+ *	@adap: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@port: the port id
+ *
+ *	Restarts autonegotiation for the selected port.
+ */
+int
+t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
+{
+	struct fw_port_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
+	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+	    FW_LEN16(c));
+	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+struct intr_info {
+	unsigned int mask;	/* bits to check in interrupt status */
+	const char *msg;	/* message to print or NULL */
+	short stat_idx;		/* stat counter to increment or -1 */
+	unsigned short fatal;	/* whether the condition reported is fatal */
+};
+
+/*
+ *	t4_handle_intr_status - table driven interrupt handler
+ *	@adapter: the adapter that generated the interrupt
+ *	@reg: the interrupt status register to process
+ *	@acts: table of interrupt actions
+ *
+ *	A table driven interrupt handler that applies a set of masks to an
+ *	interrupt status word and performs the corresponding actions if the
+ *	interrupts described by the mask have occured.  The actions include
+ *	optionally emitting a warning or alert message.  The table is terminated
+ *	by an entry specifying mask 0.  Returns the number of fatal interrupt
+ *	conditions.
+ */
+static int
+t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
+    const struct intr_info *acts)
+{
+	int fatal = 0;
+	unsigned int mask = 0;
+	unsigned int status = t4_read_reg(adapter, reg);
+
+	for (/* */; acts->mask; ++acts) {
+		if (!(status & acts->mask))
+			continue;
+		if (acts->fatal != 0) {
+			fatal++;
+			CH_ALERT(adapter, "%s (0x%x)",
+			    acts->msg, status & acts->mask);
+		} else if (acts->msg != NULL)
+			CH_WARN_RATELIMIT(adapter, "%s (0x%x)",
+			    acts->msg, status & acts->mask);
+		mask |= acts->mask;
+	}
+	status &= mask;
+	if (status != 0)		/* clear processed interrupts */
+		t4_write_reg(adapter, reg, status);
+	return (fatal);
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void
+pcie_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info sysbus_intr_info[] = {
+		{ F_RNPP, "RXNP array parity error", -1, 1 },
+		{ F_RPCP, "RXPC array parity error", -1, 1 },
+		{ F_RCIP, "RXCIF array parity error", -1, 1 },
+		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
+		{ F_RFTP, "RXFT array parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info pcie_port_intr_info[] = {
+		{ F_TPCP, "TXPC array parity error", -1, 1 },
+		{ F_TNPP, "TXNP array parity error", -1, 1 },
+		{ F_TFTP, "TXFT array parity error", -1, 1 },
+		{ F_TCAP, "TXCA array parity error", -1, 1 },
+		{ F_TCIP, "TXCIF array parity error", -1, 1 },
+		{ F_RCAP, "RXCA array parity error", -1, 1 },
+		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
+		{ F_RDPE, "Rx data parity error", -1, 1 },
+		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info pcie_intr_info[] = {
+		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
+		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
+		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
+		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
+		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
+		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
+		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
+		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
+		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
+		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
+		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
+		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
+		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
+		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
+		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
+		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
+		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
+		    0 },
+		{ 0 }
+	};
+
+	int fat;
+
+	fat = t4_handle_intr_status(adapter,
+	    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, sysbus_intr_info) +
+	    t4_handle_intr_status(adapter,
+	    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, pcie_port_intr_info) +
+	    t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
+	if (fat != 0)
+		t4_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void
+tp_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info tp_intr_info[] = {
+		{ 0x3fffffff, "TP parity error", -1, 1 },
+		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info) != 0)
+		t4_fatal_err(adapter);
+}
+
+/*
+ * SGE interrupt handler.
+ */
+static void
+sge_intr_handler(struct adapter *adapter)
+{
+	u64 v;
+	u32 err;
+
+	static struct intr_info sge_intr_info[] = {
+		{ F_ERR_CPL_EXCEED_IQE_SIZE,
+		    "SGE received CPL exceeding IQE size", -1, 1 },
+		{ F_ERR_INVALID_CIDX_INC,
+		    "SGE GTS CIDX increment too large", -1, 0 },
+		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
+		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
+		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
+		    "SGE IQID > 1023 received CPL for FL", -1, 0 },
+		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+		    0 },
+		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+		    0 },
+		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+		    0 },
+		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+		    0 },
+		{ F_ERR_ING_CTXT_PRIO,
+		    "SGE too many priority ingress contexts", -1, 0 },
+		{ F_ERR_EGR_CTXT_PRIO,
+		    "SGE too many priority egress contexts", -1, 0 },
+		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
+		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+		{ 0 }
+	};
+
+	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
+	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
+	if (v != 0) {
+		CH_ALERT(adapter, "SGE parity error (%llx)",
+		    (unsigned long long)v);
+		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
+		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
+	}
+
+	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
+
+	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
+	if (err & F_ERROR_QID_VALID) {
+		CH_ERR(adapter, "SGE error for queue %u", G_ERROR_QID(err));
+		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID);
+	}
+
+	if (v != 0)
+		t4_fatal_err(adapter);
+}
+
+#define	CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
+	F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
+#define	CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
+	F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
+
+/*
+ * CIM interrupt handler.
+ */
+static void
+cim_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info cim_intr_info[] = {
+		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
+		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
+		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
+		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info cim_upintr_info[] = {
+		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
+		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
+		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
+		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
+		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
+		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
+		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
+		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
+		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
+		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
+		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
+		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
+		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
+		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
+		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
+		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
+		{ F_SGLRDCTLINT, "CIM single read from CTL space", -1, 1 },
+		{ F_SGLWRCTLINT, "CIM single write to CTL space", -1, 1 },
+		{ F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
+		{ F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
+		{ F_SGLRDPLINT, "CIM single read from PL space", -1, 1 },
+		{ F_SGLWRPLINT, "CIM single write to PL space", -1, 1 },
+		{ F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
+		{ F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
+		{ F_REQOVRLOOKUPINT, "CIM request FIFO overwrite", -1, 1 },
+		{ F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite", -1, 1 },
+		{ F_TIMEOUTINT, "CIM PIF timeout", -1, 1 },
+		{ F_TIMEOUTMAINT, "CIM PIF MA timeout", -1, 1 },
+		{ 0 }
+	};
+
+	int fat;
+
+	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
+	    cim_intr_info) +
+	    t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
+	    cim_upintr_info);
+	if (fat != 0)
+		t4_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void
+ulprx_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info ulprx_intr_info[] = {
+		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
+		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
+		{ 0x7fffff, "ULPRX parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)
+	    != 0)
+		t4_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void
+ulptx_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info ulptx_intr_info[] = {
+		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+		    0 },
+		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+		    0 },
+		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+		    0 },
+		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+		    0 },
+		{ 0xfffffff, "ULPTX parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)
+	    != 0)
+		t4_fatal_err(adapter);
+}
+
+/*
+ * PM TX interrupt handler.
+ */
+static void
+pmtx_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info pmtx_intr_info[] = {
+		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
+		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
+		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
+		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+		{ 0xffffff0, "PMTX framing error", -1, 1 },
+		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
+		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
+		    1 },
+		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
+		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * PM RX interrupt handler.
+ */
+static void
+pmrx_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info pmrx_intr_info[] = {
+		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+		{ 0x3ffff0, "PMRX framing error", -1, 1 },
+		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
+		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
+		    1 },
+		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
+		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void
+cplsw_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info cplsw_intr_info[] = {
+		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
+		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
+		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
+		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
+		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
+		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * LE interrupt handler.
+ */
+static void
+le_intr_handler(struct adapter *adap)
+{
+	static struct intr_info le_intr_info[] = {
+		{ F_LIPMISS, "LE LIP miss", -1, 0 },
+		{ F_LIP0, "LE 0 LIP error", -1, 0 },
+		{ F_PARITYERR, "LE parity error", -1, 1 },
+		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
+		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
+		t4_fatal_err(adap);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void
+mps_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info mps_rx_intr_info[] = {
+		{ 0xffffff, "MPS Rx parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_tx_intr_info[] = {
+		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
+		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
+		    -1, 1 },
+		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
+		    -1, 1 },
+		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
+		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
+		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_trc_intr_info[] = {
+		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
+		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
+		    1 },
+		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_stat_sram_intr_info[] = {
+		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_stat_tx_intr_info[] = {
+		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_stat_rx_intr_info[] = {
+		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_cls_intr_info[] = {
+		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
+		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
+		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+		{ 0 }
+	};
+
+	int fat;
+
+	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
+	    mps_rx_intr_info) +
+	    t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
+	    mps_tx_intr_info) +
+	    t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
+	    mps_trc_intr_info) +
+	    t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
+	    mps_stat_sram_intr_info) +
+	    t4_handle_intr_status(adapter,
+	    A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+	    mps_stat_tx_intr_info) +
+	    t4_handle_intr_status(adapter,
+	    A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+	    mps_stat_rx_intr_info) +
+	    t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
+	    mps_cls_intr_info);
+
+	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
+	(void) t4_read_reg(adapter, A_MPS_INT_CAUSE);	/* flush */
+	if (fat != 0)
+		t4_fatal_err(adapter);
+}
+
+#define	MEM_INT_MASK \
+	(F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
+
+/*
+ * EDC/MC interrupt handler.
+ */
+static void
+mem_intr_handler(struct adapter *adapter, int idx)
+{
+	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+
+	unsigned int addr, cnt_addr, v;
+
+	if (idx <= MEM_EDC1) {
+		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
+		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
+	} else {
+		addr = A_MC_INT_CAUSE;
+		cnt_addr = A_MC_ECC_STATUS;
+	}
+
+	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
+	if (v & F_PERR_INT_CAUSE)
+		CH_ALERT(adapter, "%s FIFO parity error", name[idx]);
+	if (v & F_ECC_CE_INT_CAUSE) {
+		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
+
+		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
+		CH_WARN_RATELIMIT(adapter,
+		    "%u %s correctable ECC data error%s", cnt, name[idx],
+		    cnt > 1 ? "s" : "");
+	}
+	if (v & F_ECC_UE_INT_CAUSE)
+		CH_ALERT(adapter, "%s uncorrectable ECC data error",
+		    name[idx]);
+
+	t4_write_reg(adapter, addr, v);
+	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * MA interrupt handler.
+ */
+static void
+ma_intr_handler(struct adapter *adapter)
+{
+	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
+
+	if (status & F_MEM_PERR_INT_CAUSE)
+		CH_ALERT(adapter, "MA parity error, parity status %x",
+		    t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
+	if (status & F_MEM_WRAP_INT_CAUSE) {
+		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
+		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
+		    " address %x", G_MEM_WRAP_CLIENT_NUM(v),
+		    G_MEM_WRAP_ADDRESS(v) << 4);
+	}
+	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
+	t4_fatal_err(adapter);
+}
+
+/*
+ * SMB interrupt handler.
+ */
+static void
+smb_intr_handler(struct adapter *adap)
+{
+	static struct intr_info smb_intr_info[] = {
+		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
+		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
+		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info) != 0)
+		t4_fatal_err(adap);
+}
+
+/*
+ * NC-SI interrupt handler.
+ */
+static void
+ncsi_intr_handler(struct adapter *adap)
+{
+	static struct intr_info ncsi_intr_info[] = {
+		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
+		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
+		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
+		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info) != 0)
+		t4_fatal_err(adap);
+}
+
+/*
+ * XGMAC interrupt handler.
+ */
+static void
+xgmac_intr_handler(struct adapter *adap, int port)
+{
+	u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
+
+	v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
+	if (!v)
+		return;
+
+	if (v & F_TXFIFO_PRTY_ERR)
+		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error", port);
+	if (v & F_RXFIFO_PRTY_ERR)
+		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error", port);
+	t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
+	t4_fatal_err(adap);
+}
+
+/*
+ * PL interrupt handler.
+ */
+static void
+pl_intr_handler(struct adapter *adap)
+{
+	static struct intr_info pl_intr_info[] = {
+		{ F_FATALPERR, "T4 fatal parity error", -1, 1 },
+		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info) != 0)
+		t4_fatal_err(adap);
+}
+
+#define	PF_INTR_MASK (F_PFSW | F_PFCIM)
+#define	GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
+		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
+		F_CPL_SWITCH | F_SGE | F_ULP_TX)
+
+/*
+ *	t4_slow_intr_handler - control path interrupt handler
+ *	@adapter: the adapter
+ *
+ *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
+ *	The designation 'slow' is because it involves register reads, while
+ *	data interrupts typically don't involve any MMIOs.
+ */
+int
+t4_slow_intr_handler(struct adapter *adapter)
+{
+	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
+
+	if (!(cause & GLBL_INTR_MASK))
+		return (0);
+	if (cause & F_CIM)
+		cim_intr_handler(adapter);
+	if (cause & F_MPS)
+		mps_intr_handler(adapter);
+	if (cause & F_NCSI)
+		ncsi_intr_handler(adapter);
+	if (cause & F_PL)
+		pl_intr_handler(adapter);
+	if (cause & F_SMB)
+		smb_intr_handler(adapter);
+	if (cause & F_XGMAC0)
+		xgmac_intr_handler(adapter, 0);
+	if (cause & F_XGMAC1)
+		xgmac_intr_handler(adapter, 1);
+	if (cause & F_XGMAC_KR0)
+		xgmac_intr_handler(adapter, 2);
+	if (cause & F_XGMAC_KR1)
+		xgmac_intr_handler(adapter, 3);
+	if (cause & F_PCIE)
+		pcie_intr_handler(adapter);
+	if (cause & F_MC)
+		mem_intr_handler(adapter, MEM_MC);
+	if (cause & F_EDC0)
+		mem_intr_handler(adapter, MEM_EDC0);
+	if (cause & F_EDC1)
+		mem_intr_handler(adapter, MEM_EDC1);
+	if (cause & F_LE)
+		le_intr_handler(adapter);
+	if (cause & F_TP)
+		tp_intr_handler(adapter);
+	if (cause & F_MA)
+		ma_intr_handler(adapter);
+	if (cause & F_PM_TX)
+		pmtx_intr_handler(adapter);
+	if (cause & F_PM_RX)
+		pmrx_intr_handler(adapter);
+	if (cause & F_ULP_RX)
+		ulprx_intr_handler(adapter);
+	if (cause & F_CPL_SWITCH)
+		cplsw_intr_handler(adapter);
+	if (cause & F_SGE)
+		sge_intr_handler(adapter);
+	if (cause & F_ULP_TX)
+		ulptx_intr_handler(adapter);
+
+	/* Clear the interrupts just processed for which we are the master. */
+	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
+	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
+	return (1);
+}
+
+/*
+ *	t4_intr_enable - enable interrupts
+ *	@adapter: the adapter whose interrupts should be enabled
+ *
+ *	Enable PF-specific interrupts for the calling function and the top-level
+ *	interrupt concentrator for global interrupts.  Interrupts are already
+ *	enabled at each module,	here we just enable the roots of the interrupt
+ *	hierarchies.
+ *
+ *	Note: this function should be called only when the driver manages
+ *	non PF-specific interrupts from the various HW modules.  Only one PCI
+ *	function at a time should be doing this.
+ */
+void
+t4_intr_enable(struct adapter *adapter)
+{
+	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+
+	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
+	    F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | F_ERR_DROPPED_DB |
+	    F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0 |
+	    F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
+	    F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_ERR_EGR_CTXT_PRIO |
+	    F_INGRESS_SIZE_ERR | F_EGRESS_SIZE_ERR);
+	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
+	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
+}
+
+/*
+ *	t4_intr_disable - disable interrupts
+ *	@adapter: the adapter whose interrupts should be disabled
+ *
+ *	Disable interrupts.  We only disable the top-level interrupt
+ *	concentrators.  The caller must be a PCI function managing global
+ *	interrupts.
+ */
+void
+t4_intr_disable(struct adapter *adapter)
+{
+	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+
+	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
+	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
+}
+
+/*
+ *	t4_intr_clear - clear all interrupts
+ *	@adapter: the adapter whose interrupts should be cleared
+ *
+ *	Clears all interrupts.  The caller must be a PCI function managing
+ *	global interrupts.
+ */
+void
+t4_intr_clear(struct adapter *adapter)
+{
+	static const unsigned int cause_reg[] = {
+		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
+		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
+		A_MC_INT_CAUSE,
+		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
+		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
+		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
+		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
+		A_TP_INT_CAUSE,
+		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
+		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
+		A_MPS_RX_PERR_INT_CAUSE,
+		A_CPL_INTR_CAUSE,
+		MYPF_REG(A_PL_PF_INT_CAUSE),
+		A_PL_PL_INT_CAUSE,
+		A_LE_DB_INT_CAUSE,
+	};
+
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
+		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
+
+	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
+	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);	/* flush */
+}
+
+/*
+ *	hash_mac_addr - return the hash value of a MAC address
+ *	@addr: the 48-bit Ethernet MAC address
+ *
+ *	Hashes a MAC address according to the hash function used by HW inexact
+ *	(hash) address matching.
+ */
+static int
+hash_mac_addr(const u8 *addr)
+{
+	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+	a ^= b;
+	a ^= (a >> 12);
+	a ^= (a >> 6);
+	return (a & 0x3f);
+}
+
+/*
+ *	t4_config_rss_range - configure a portion of the RSS mapping table
+ *	@adapter: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@viid: virtual interface whose RSS subtable is to be written
+ *	@start: start entry in the table to write
+ *	@n: how many table entries to write
+ *	@rspq: values for the "response queue" (Ingress Queue) lookup table
+ *	@nrspq: number of values in @rspq
+ *
+ *	Programs the selected part of the VI's RSS mapping table with the
+ *	provided values.  If @nrspq < @n the supplied values are used repeatedly
+ *	until the full table range is populated.
+ *
+ *	The caller must ensure the values in @rspq are in the range allowed for
+ *	@viid.
+ */
+int
+t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+    int start, int n, const u16 *rspq, unsigned int nrspq)
+{
+	int ret;
+	const u16 *rsp = rspq;
+	const u16 *rsp_end = rspq + nrspq;
+	struct fw_rss_ind_tbl_cmd cmd;
+
+	(void) memset(&cmd, 0, sizeof (cmd));
+	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+	    V_FW_RSS_IND_TBL_CMD_VIID(viid));
+	cmd.retval_len16 = htonl(FW_LEN16(cmd));
+
+	/*
+	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
+	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
+	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
+	 * reserved.
+	 */
+	while (n > 0) {
+		int nq = min(n, 32);
+		__be32 *qp = &cmd.iq0_to_iq2;
+
+		/*
+		 * Set up the firmware RSS command header to send the next
+		 * "nq" Ingress Queue IDs to the firmware.
+		 */
+		cmd.niqid = htons(nq);
+		cmd.startidx = htons(start);
+
+		/*
+		 * "nq" more done for the start of the next loop.
+		 */
+		start += nq;
+		n -= nq;
+
+		/*
+		 * While there are still Ingress Queue IDs to stuff into the
+		 * current firmware RSS command, retrieve them from the
+		 * Ingress Queue ID array and insert them into the command.
+		 */
+		while (nq > 0) {
+			unsigned int v;
+			/*
+			 * Grab up to the next 3 Ingress Queue IDs (wrapping
+			 * around the Ingress Queue ID array if necessary) and
+			 * insert them into the firmware RSS command at the
+			 * current 3-tuple position within the commad.
+			 */
+			v = V_FW_RSS_IND_TBL_CMD_IQ0(*rsp);
+			if (++rsp >= rsp_end)
+				rsp = rspq;
+			v |= V_FW_RSS_IND_TBL_CMD_IQ1(*rsp);
+			if (++rsp >= rsp_end)
+				rsp = rspq;
+			v |= V_FW_RSS_IND_TBL_CMD_IQ2(*rsp);
+			if (++rsp >= rsp_end)
+				rsp = rspq;
+
+			*qp++ = htonl(v);
+			nq -= 3;
+		}
+
+		/*
+		 * Send this portion of the RRS table update to the firmware;
+		 * bail out on any errors.
+		 */
+		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof (cmd), NULL);
+		if (ret != 0)
+			return (ret);
+	}
+
+	return (0);
+}
+
+/*
+ *	t4_config_glbl_rss - configure the global RSS mode
+ *	@adapter: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@mode: global RSS mode
+ *	@flags: mode-specific flags
+ *
+ *	Sets the global RSS mode.
+ */
+int
+t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+    unsigned int flags)
+{
+	struct fw_rss_glb_config_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+	c.retval_len16 = htonl(FW_LEN16(c));
+	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
+		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+		c.u.basicvirtual.mode_pkd =
+		    htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+	} else
+		return (-EINVAL);
+	return (t4_wr_mbox(adapter, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_config_vi_rss - configure per VI RSS settings
+ *	@adapter: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@viid: the VI id
+ *	@flags: RSS flags
+ *	@defq: id of the default RSS queue for the VI.
+ *
+ *	Configures VI-specific RSS properties.
+ */
+int
+t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+    unsigned int flags, unsigned int defq)
+{
+	struct fw_rss_vi_config_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+	    V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+	c.retval_len16 = htonl(FW_LEN16(c));
+	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
+	    V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
+	return (t4_wr_mbox(adapter, mbox, &c, sizeof (c), NULL));
+}
+
+/* Read an RSS table row */
+static int
+rd_rss_row(struct adapter *adap, int row, u32 *val)
+{
+	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
+	return (t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
+	    5, 0, val));
+}
+
+/*
+ *	t4_read_rss - read the contents of the RSS mapping table
+ *	@adapter: the adapter
+ *	@map: holds the contents of the RSS mapping table
+ *
+ *	Reads the contents of the RSS hash->queue mapping table.
+ */
+int
+t4_read_rss(struct adapter *adapter, u16 *map)
+{
+	u32 val;
+	int i, ret;
+
+	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+		ret = rd_rss_row(adapter, i, &val);
+		if (ret != 0)
+			return (ret);
+		*map++ = G_LKPTBLQUEUE0(val);
+		*map++ = G_LKPTBLQUEUE1(val);
+	}
+	return (0);
+}
+
+/*
+ *	t4_read_rss_key - read the global RSS key
+ *	@adap: the adapter
+ *	@key: 10-entry array holding the 320-bit RSS key
+ *
+ *	Reads the global 320-bit RSS key.
+ */
+void
+t4_read_rss_key(struct adapter *adap, u32 *key)
+{
+	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
+	    A_TP_RSS_SECRET_KEY0);
+}
+
+/*
+ *	t4_write_rss_key - program one of the RSS keys
+ *	@adap: the adapter
+ *	@key: 10-entry array holding the 320-bit RSS key
+ *	@idx: which RSS key to write
+ *
+ *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
+ *	0..15 the corresponding entry in the RSS key table is written,
+ *	otherwise the global RSS key is written.
+ */
+void
+t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
+{
+	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
+	    A_TP_RSS_SECRET_KEY0);
+	if (idx >= 0 && idx < 16)
+		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+		    V_KEYWRADDR(idx) | F_KEYWREN);
+}
+
+/*
+ *	t4_read_rss_pf_config - read PF RSS Configuration Table
+ *	@adapter: the adapter
+ *	@index: the entry in the PF RSS table to read
+ *	@valp: where to store the returned value
+ *
+ *	Reads the PF RSS Configuration Table at the specified index and returns
+ *	the value found there.
+ */
+void
+t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
+{
+	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+	    valp, 1, A_TP_RSS_PF0_CONFIG + index);
+}
+
+/*
+ *	t4_write_rss_pf_config - write PF RSS Configuration Table
+ *	@adapter: the adapter
+ *	@index: the entry in the VF RSS table to read
+ *	@val: the value to store
+ *
+ *	Writes the PF RSS Configuration Table at the specified index with the
+ *	specified value.
+ */
+void
+t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
+{
+	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+	    &val, 1, A_TP_RSS_PF0_CONFIG + index);
+}
+
+/*
+ *	t4_read_rss_vf_config - read VF RSS Configuration Table
+ *	@adapter: the adapter
+ *	@index: the entry in the VF RSS table to read
+ *	@vfl: where to store the returned VFL
+ *	@vfh: where to store the returned VFH
+ *
+ *	Reads the VF RSS Configuration Table at the specified index and returns
+ *	the (VFL, VFH) values found there.
+ */
+void
+t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, u32 *vfl,
+    u32 *vfh)
+{
+	u32 vrt;
+
+	/*
+	 * Request that the index'th VF Table values be read into VFL/VFH.
+	 */
+	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
+	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
+	vrt |= V_VFWRADDR(index) | F_VFRDEN;
+	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
+
+	/*
+	 * Grab the VFL/VFH values ...
+	 */
+	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+	    vfl, 1, A_TP_RSS_VFL_CONFIG);
+	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+	    vfh, 1, A_TP_RSS_VFH_CONFIG);
+}
+
+/*
+ *	t4_write_rss_vf_config - write VF RSS Configuration Table
+ *
+ *	@adapter: the adapter
+ *	@index: the entry in the VF RSS table to write
+ *	@vfl: the VFL to store
+ *	@vfh: the VFH to store
+ *
+ *	Writes the VF RSS Configuration Table at the specified index with the
+ *	specified (VFL, VFH) values.
+ */
+void
+t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, u32 vfl,
+    u32 vfh)
+{
+	u32 vrt;
+
+	/*
+	 * Load up VFL/VFH with the values to be written ...
+	 */
+	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+	    &vfl, 1, A_TP_RSS_VFL_CONFIG);
+	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+	    &vfh, 1, A_TP_RSS_VFH_CONFIG);
+
+	/*
+	 * Write the VFL/VFH into the VF Table at index'th location.
+	 */
+	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
+	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
+	vrt |= V_VFWRADDR(index) | F_VFWREN;
+	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
+}
+
+/*
+ *	t4_read_rss_pf_map - read PF RSS Map
+ *	@adapter: the adapter
+ *
+ *	Reads the PF RSS Map register and returns its value.
+ */
+u32
+t4_read_rss_pf_map(struct adapter *adapter)
+{
+	u32 pfmap;
+
+	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+	    &pfmap, 1, A_TP_RSS_PF_MAP);
+	return (pfmap);
+}
+
+/*
+ *	t4_write_rss_pf_map - write PF RSS Map
+ *	@adapter: the adapter
+ *	@pfmap: PF RSS Map value
+ *
+ *	Writes the specified value to the PF RSS Map register.
+ */
+void
+t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
+{
+	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+	    &pfmap, 1, A_TP_RSS_PF_MAP);
+}
+
+/*
+ *	t4_read_rss_pf_mask - read PF RSS Mask
+ *	@adapter: the adapter
+ *
+ *	Reads the PF RSS Mask register and returns its value.
+ */
+u32
+t4_read_rss_pf_mask(struct adapter *adapter)
+{
+	u32 pfmask;
+
+	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+	    &pfmask, 1, A_TP_RSS_PF_MSK);
+	return (pfmask);
+}
+
+/*
+ *	t4_write_rss_pf_mask - write PF RSS Mask
+ *	@adapter: the adapter
+ *	@pfmask: PF RSS Mask value
+ *
+ *	Writes the specified value to the PF RSS Mask register.
+ */
+void
+t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
+{
+	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+	    &pfmask, 1, A_TP_RSS_PF_MSK);
+}
+
+/*
+ *	t4_set_filter_mode - configure the optional components of filter tuples
+ *	@adap: the adapter
+ *	@mode_map: a bitmap selcting which optional filter components to enable
+ *
+ *	Sets the filter mode by selecting the optional components to enable
+ *	in filter tuples.  Returns 0 on success and a negative error if the
+ *	requested mode needs more bits than are available for optional
+ *	components.
+ */
+int
+t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
+{
+	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
+
+	int i, nbits = 0;
+
+	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
+		if (mode_map & (1 << i))
+			nbits += width[i];
+	if (nbits > FILTER_OPT_LEN)
+		return (-EINVAL);
+	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
+	    A_TP_VLAN_PRI_MAP);
+	return (0);
+}
+
+/*
+ *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
+ *	@adap: the adapter
+ *	@v4: holds the TCP/IP counter values
+ *	@v6: holds the TCP/IPv6 counter values
+ *
+ *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
+ *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
+ */
+void
+t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+    struct tp_tcp_stats *v6)
+{
+	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
+
+#define	STAT_IDX(x)	((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
+#define	STAT(x)		val[STAT_IDX(x)]
+#define	STAT64(x)	(((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
+
+	if (v4 != NULL) {
+		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
+		    ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
+		v4->tcpOutRsts = STAT(OUT_RST);
+		v4->tcpInSegs  = STAT64(IN_SEG);
+		v4->tcpOutSegs = STAT64(OUT_SEG);
+		v4->tcpRetransSegs = STAT64(RXT_SEG);
+	}
+	if (v6 != NULL) {
+		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
+		    ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
+		v6->tcpOutRsts = STAT(OUT_RST);
+		v6->tcpInSegs  = STAT64(IN_SEG);
+		v6->tcpOutSegs = STAT64(OUT_SEG);
+		v6->tcpRetransSegs = STAT64(RXT_SEG);
+	}
+#undef STAT64
+#undef STAT
+#undef STAT_IDX
+}
+
+/*
+ *	t4_tp_get_err_stats - read TP's error MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's error counters.
+ */
+void
+t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
+	    12, A_TP_MIB_MAC_IN_ERR_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
+	    8, A_TP_MIB_TNL_CNG_DROP_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
+	    4, A_TP_MIB_TNL_DROP_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
+	    4, A_TP_MIB_OFD_VLN_DROP_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
+	    4, A_TP_MIB_TCP_V6IN_ERR_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
+	    2, A_TP_MIB_OFD_ARP_DROP);
+}
+
+/*
+ *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's proxy counters.
+ */
+void
+t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
+{
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
+	    4, A_TP_MIB_TNL_LPBK_0);
+}
+
+/*
+ *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's CPL counters.
+ */
+void
+t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
+{
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
+	    8, A_TP_MIB_CPL_IN_REQ_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tx_err,
+	    4, A_TP_MIB_CPL_OUT_ERR_0);
+}
+
+/*
+ *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's RDMA counters.
+ */
+void
+t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
+{
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
+	    2, A_TP_MIB_RQE_DFR_MOD);
+}
+
+/*
+ *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
+ *	@adap: the adapter
+ *	@idx: the port index
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's FCoE counters for the selected port.
+ */
+void
+t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+    struct tp_fcoe_stats *st)
+{
+	u32 val[2];
+
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
+	    1, A_TP_MIB_FCOE_DDP_0 + idx);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
+	    1, A_TP_MIB_FCOE_DROP_0 + idx);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
+	    2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
+	st->octetsDDP = ((u64)val[0] << 32) | val[1];
+}
+
+/*
+ *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's counters for non-TCP directly-placed packets.
+ */
+void
+t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
+{
+	u32 val[4];
+
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
+	    A_TP_MIB_USM_PKTS);
+	st->frames = val[0];
+	st->drops = val[1];
+	st->octets = ((u64)val[2] << 32) | val[3];
+}
+
+/*
+ *	t4_read_mtu_tbl - returns the values in the HW path MTU table
+ *	@adap: the adapter
+ *	@mtus: where to store the MTU values
+ *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
+ *
+ *	Reads the HW path MTU table.
+ */
+void
+t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
+{
+	u32 v;
+	int i;
+
+	for (i = 0; i < NMTUS; ++i) {
+		t4_write_reg(adap, A_TP_MTU_TABLE,
+		    V_MTUINDEX(0xffU) | V_MTUVALUE(i));
+		v = t4_read_reg(adap, A_TP_MTU_TABLE);
+		mtus[i] = G_MTUVALUE(v);
+		if (mtu_log != NULL)
+			mtu_log[i] = G_MTUWIDTH(v);
+	}
+}
+
+/*
+ *	t4_read_cong_tbl - reads the congestion control table
+ *	@adap: the adapter
+ *	@incr: where to store the alpha values
+ *
+ *	Reads the additive increments programmed into the HW congestion
+ *	control table.
+ */
+void
+t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
+{
+	unsigned int mtu, w;
+
+	for (mtu = 0; mtu < NMTUS; ++mtu)
+		for (w = 0; w < NCCTRL_WIN; ++w) {
+			t4_write_reg(adap, A_TP_CCTRL_TABLE,
+			    V_ROWINDEX(0xffffU) | (mtu << 5) | w);
+			incr[mtu][w] = (u16)t4_read_reg(adap,
+			    A_TP_CCTRL_TABLE) & 0x1fff;
+		}
+}
+
+/*
+ *	t4_read_pace_tbl - read the pace table
+ *	@adap: the adapter
+ *	@pace_vals: holds the returned values
+ *
+ *	Returns the values of TP's pace table in microseconds.
+ */
+void
+t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
+{
+	unsigned int i, v;
+
+	for (i = 0; i < NTX_SCHED; i++) {
+		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
+		v = t4_read_reg(adap, A_TP_PACE_TABLE);
+		pace_vals[i] = dack_ticks_to_usec(adap, v);
+	}
+}
+
+/*
+ *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ *	@adap: the adapter
+ *	@addr: the indirect TP register address
+ *	@mask: specifies the field within the register to modify
+ *	@val: new value for the field
+ *
+ *	Sets a field of an indirect TP register to the given value.
+ */
+void
+t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+    unsigned int mask, unsigned int val)
+{
+	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
+	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
+	t4_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
+/*
+ *	init_cong_ctrl - initialize congestion control parameters
+ *	@a: the alpha values for congestion control
+ *	@b: the beta values for congestion control
+ *
+ *	Initialize the congestion control parameters.
+ */
+static void __devinit
+init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+	a[9] = 2;
+	a[10] = 3;
+	a[11] = 4;
+	a[12] = 5;
+	a[13] = 6;
+	a[14] = 7;
+	a[15] = 8;
+	a[16] = 9;
+	a[17] = 10;
+	a[18] = 14;
+	a[19] = 17;
+	a[20] = 21;
+	a[21] = 25;
+	a[22] = 30;
+	a[23] = 35;
+	a[24] = 45;
+	a[25] = 60;
+	a[26] = 80;
+	a[27] = 100;
+	a[28] = 200;
+	a[29] = 300;
+	a[30] = 400;
+	a[31] = 500;
+
+	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+	b[9] = b[10] = 1;
+	b[11] = b[12] = 2;
+	b[13] = b[14] = b[15] = b[16] = 3;
+	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+	b[28] = b[29] = 6;
+	b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define	CC_MIN_INCR 2U
+
+/*
+ *	t4_load_mtus - write the MTU and congestion control HW tables
+ *	@adap: the adapter
+ *	@mtus: the values for the MTU table
+ *	@alpha: the values for the congestion control alpha parameter
+ *	@beta: the values for the congestion control beta parameter
+ *
+ *	Write the HW MTU table with the supplied MTUs and the high-speed
+ *	congestion control table with the supplied alpha, beta, and MTUs.
+ *	We write the two tables together because the additive increments
+ *	depend on the MTUs.
+ */
+void
+t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+    const unsigned short *alpha, const unsigned short *beta)
+{
+	static const unsigned int avg_pkts[NCCTRL_WIN] = {
+		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+		28672, 40960, 57344, 81920, 114688, 163840, 229376
+	};
+
+	unsigned int i, w;
+
+	for (i = 0; i < NMTUS; ++i) {
+		unsigned int mtu = mtus[i];
+		unsigned int log2 = fls(mtu);
+
+		if (!(mtu & ((1 << log2) >> 2)))	/* round */
+			log2--;
+		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
+		    V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
+
+		for (w = 0; w < NCCTRL_WIN; ++w) {
+			unsigned int inc;
+
+			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+			    CC_MIN_INCR);
+
+			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
+			    (w << 16) | (beta[w] << 13) | inc);
+		}
+	}
+}
+
+/*
+ *	t4_set_pace_tbl - set the pace table
+ *	@adap: the adapter
+ *	@pace_vals: the pace values in microseconds
+ *	@start: index of the first entry in the HW pace table to set
+ *	@n: how many entries to set
+ *
+ *	Sets (a subset of the) HW pace table.
+ */
+int
+t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
+    unsigned int start, unsigned int n)
+{
+	unsigned int vals[NTX_SCHED], i;
+	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
+
+	if (n > NTX_SCHED)
+		return (-ERANGE);
+
+	/* convert values from us to dack ticks, rounding to closest value */
+	for (i = 0; i < n; i++, pace_vals++) {
+		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
+		if (vals[i] > 0x7ff)
+			return (-ERANGE);
+		if (*pace_vals && vals[i] == 0)
+			return (-ERANGE);
+	}
+	for (i = 0; i < n; i++, start++)
+		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
+	return (0);
+}
+
+/*
+ *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
+ *	@adap: the adapter
+ *	@kbps: target rate in Kbps
+ *	@sched: the scheduler index
+ *
+ *	Configure a Tx HW scheduler for the target rate.
+ */
+int
+t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
+{
+	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0U;
+	unsigned int clk = adap->params.vpd.cclk * 1000;
+	unsigned int selected_cpt = 0, selected_bpt = 0;
+
+	if (kbps > 0) {
+		kbps *= 125;	/* -> bytes */
+		for (cpt = 1; cpt <= 255; cpt++) {
+			tps = clk / cpt;
+			bpt = (kbps + tps / 2) / tps;
+			if (bpt > 0 && bpt <= 255) {
+				v = bpt * tps;
+				delta = v >= kbps ? v - kbps : kbps - v;
+				if (delta < mindelta) {
+					mindelta = delta;
+					selected_cpt = cpt;
+					selected_bpt = bpt;
+				}
+			} else if (selected_cpt != 0)
+				break;
+		}
+		if (!selected_cpt)
+			return (-EINVAL);
+	}
+	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
+	    A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
+	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
+	if (sched & 1)
+		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
+	else
+		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
+	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
+	return (0);
+}
+
+/*
+ *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
+ *	@adap: the adapter
+ *	@sched: the scheduler index
+ *	@ipg: the interpacket delay in tenths of nanoseconds
+ *
+ *	Set the interpacket delay for a HW packet rate scheduler.
+ */
+int
+t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
+{
+	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
+
+	/* convert ipg to nearest number of core clocks */
+	ipg *= core_ticks_per_usec(adap);
+	ipg = (ipg + 5000) / 10000;
+	if (ipg > M_TXTIMERSEPQ0)
+		return (-EINVAL);
+
+	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
+	if (sched & 1)
+		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
+	else
+		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
+	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
+	(void) t4_read_reg(adap, A_TP_TM_PIO_DATA);
+	return (0);
+}
+
+/*
+ *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
+ *	@adap: the adapter
+ *	@sched: the scheduler index
+ *	@kbps: the byte rate in Kbps
+ *	@ipg: the interpacket delay in tenths of nanoseconds
+ *
+ *	Return the current configuration of a HW Tx scheduler.
+ */
+void
+t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
+    unsigned int *ipg)
+{
+	unsigned int v, addr, bpt, cpt;
+
+	if (kbps != NULL) {
+		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
+		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
+		if (sched & 1)
+			v >>= 16;
+		bpt = (v >> 8) & 0xff;
+		cpt = v & 0xff;
+		if (!cpt)
+			*kbps = 0;	/* scheduler disabled */
+		else {
+			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
+			*kbps = (v * bpt) / 125;
+		}
+	}
+	if (ipg != NULL) {
+		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
+		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
+		if (sched & 1)
+			v >>= 16;
+		v &= 0xffff;
+		*ipg = (10000 * v) / core_ticks_per_usec(adap);
+	}
+}
+
+/*
+ * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
+ * clocks.  The formula is
+ *
+ * bytes/s = bytes256 * 256 * ClkFreq / 4096
+ *
+ * which is equivalent to
+ *
+ * bytes/s = 62.5 * bytes256 * ClkFreq_ms
+ */
+static u64
+chan_rate(struct adapter *adap, unsigned int bytes256)
+{
+	u64 v = bytes256 * adap->params.vpd.cclk;
+
+	return (v * 62 + v / 2);
+}
+
+/*
+ *	t4_get_chan_txrate - get the current per channel Tx rates
+ *	@adap: the adapter
+ *	@nic_rate: rates for NIC traffic
+ *	@ofld_rate: rates for offloaded traffic
+ *
+ *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
+ *	for each channel.
+ */
+void
+t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
+{
+	u32 v;
+
+	v = t4_read_reg(adap, A_TP_TX_TRATE);
+	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
+	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
+	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
+	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
+
+	v = t4_read_reg(adap, A_TP_TX_ORATE);
+	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
+	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
+	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
+	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
+}
+
+/*
+ *	t4_set_trace_filter - configure one of the tracing filters
+ *	@adap: the adapter
+ *	@tp: the desired trace filter parameters
+ *	@idx: which filter to configure
+ *	@enable: whether to enable or disable the filter
+ *
+ *	Configures one of the tracing filters available in HW.  If @enable is
+ *	%0 @tp is not examined and may be %NULL.
+ */
+int
+t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
+    int idx, int enable)
+{
+	int i, ofst = idx * 4;
+	u32 data_reg, mask_reg, cfg;
+	u32 multitrc = F_TRCMULTIFILTER;
+
+	if (!enable) {
+		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
+		goto out;
+	}
+
+	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
+	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE ||
+	    tp->snap_len > 9600 || (idx && tp->snap_len > 256))
+		return (-EINVAL);
+
+	if (tp->snap_len > 256) {	/* must be tracer 0 */
+		if ((t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 4) |
+		    t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 8) |
+		    t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 12)) &
+		    F_TFEN)
+			return (-EINVAL);  /* other tracers are enabled */
+		multitrc = 0;
+	} else if (idx != 0) {
+		i = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B);
+		if (G_TFCAPTUREMAX(i) > 256 &&
+		    (t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A) & F_TFEN))
+			return (-EINVAL);
+	}
+
+	/* stop the tracer we'll be changing */
+	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
+
+	/* disable tracing globally if running in the wrong single/multi mode */
+	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
+	if ((cfg & F_TRCEN) && multitrc != (cfg & F_TRCMULTIFILTER)) {
+		t4_write_reg(adap, A_MPS_TRC_CFG, cfg ^ F_TRCEN);
+		(void) t4_read_reg(adap, A_MPS_TRC_CFG);	/* flush */
+		msleep(1);
+		if (!(t4_read_reg(adap, A_MPS_TRC_CFG) & F_TRCFIFOEMPTY))
+			return (-ETIMEDOUT);
+	}
+	/*
+	 * At this point either the tracing is enabled and in the right mode or
+	 * disabled.
+	 */
+
+	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
+	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
+	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
+
+	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+		t4_write_reg(adap, data_reg, tp->data[i]);
+		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
+	}
+	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
+	    V_TFCAPTUREMAX(tp->snap_len) | V_TFMINPKTSIZE(tp->min_len));
+	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
+	    V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
+	    V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
+
+	cfg &= ~F_TRCMULTIFILTER;
+	t4_write_reg(adap, A_MPS_TRC_CFG, cfg | F_TRCEN | multitrc);
+out:	(void) t4_read_reg(adap, A_MPS_TRC_CFG);  /* flush */
+	return (0);
+}
+
+/*
+ *	t4_get_trace_filter - query one of the tracing filters
+ *	@adap: the adapter
+ *	@tp: the current trace filter parameters
+ *	@idx: which trace filter to query
+ *	@enabled: non-zero if the filter is enabled
+ *
+ *	Returns the current settings of one of the HW tracing filters.
+ */
+void
+t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
+    int *enabled)
+{
+	u32 ctla, ctlb;
+	int i, ofst = idx * 4;
+	u32 data_reg, mask_reg;
+
+	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
+	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
+
+	*enabled = !!(ctla & F_TFEN);
+	tp->snap_len = G_TFCAPTUREMAX(ctlb);
+	tp->min_len = G_TFMINPKTSIZE(ctlb);
+	tp->skip_ofst = G_TFOFFSET(ctla);
+	tp->skip_len = G_TFLENGTH(ctla);
+	tp->invert = !!(ctla & F_TFINVERTMATCH);
+	tp->port = G_TFPORT(ctla);
+
+	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
+	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
+	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
+
+	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
+		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
+	}
+}
+
+/*
+ *	t4_pmtx_get_stats - returns the HW stats from PMTX
+ *	@adap: the adapter
+ *	@cnt: where to store the count statistics
+ *	@cycles: where to store the cycle statistics
+ *
+ *	Returns performance statistics from PMTX.
+ */
+void
+t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
+{
+	int i;
+
+	for (i = 0; i < PM_NSTATS; i++) {
+		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
+		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
+		cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
+	}
+}
+
+/*
+ *	t4_pmrx_get_stats - returns the HW stats from PMRX
+ *	@adap: the adapter
+ *	@cnt: where to store the count statistics
+ *	@cycles: where to store the cycle statistics
+ *
+ *	Returns performance statistics from PMRX.
+ */
+void
+t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
+{
+	int i;
+
+	for (i = 0; i < PM_NSTATS; i++) {
+		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
+		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
+		cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
+	}
+}
+
+/*
+ *	get_mps_bg_map - return the buffer groups associated with a port
+ *	@adap: the adapter
+ *	@idx: the port index
+ *
+ *	Returns a bitmap indicating which MPS buffer groups are associated
+ *	with the given port.  Bit i is set if buffer group i is used by the
+ *	port.
+ */
+static unsigned int
+get_mps_bg_map(struct adapter *adap, int idx)
+{
+	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
+
+	if (n == 0)
+		return (idx == 0 ? 0xf : 0);
+	if (n == 1)
+		return (idx < 2 ? (3 << (2 * idx)) : 0);
+	return (1 << idx);
+}
+
+/*
+ *	t4_get_port_stats - collect port statistics
+ *	@adap: the adapter
+ *	@idx: the port index
+ *	@p: the stats structure to fill
+ *
+ *	Collect statistics related to the given port from HW.
+ */
+void
+t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
+{
+	u32 bgmap = get_mps_bg_map(adap, idx);
+
+#define	GET_STAT(name) \
+	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
+#define	GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+
+	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
+	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
+	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
+	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
+	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
+	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
+	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
+	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
+	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
+	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
+	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
+	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
+	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
+	p->tx_drop		= GET_STAT(TX_PORT_DROP);
+	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
+	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
+	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
+	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
+	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
+	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
+	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
+	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
+	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
+
+	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
+	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
+	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
+	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
+	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
+	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
+	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
+	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
+	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
+	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
+	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
+	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
+	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
+	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
+	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
+	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
+	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
+	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
+	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
+	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
+	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
+	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
+	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
+	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
+	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
+	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
+	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
+
+	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
+	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
+	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
+	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
+	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
+	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
+	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
+	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/*
+ *	t4_clr_port_stats - clear port statistics
+ *	@adap: the adapter
+ *	@idx: the port index
+ *
+ *	Clear HW statistics for the given port.
+ */
+void
+t4_clr_port_stats(struct adapter *adap, int idx)
+{
+	unsigned int i;
+	u32 bgmap = get_mps_bg_map(adap, idx);
+
+	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
+	    i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+		t4_write_reg(adap, PORT_REG(idx, i), 0);
+	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
+	    i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+		t4_write_reg(adap, PORT_REG(idx, i), 0);
+	for (i = 0; i < 4; i++)
+		if (bgmap & (1 << i)) {
+			t4_write_reg(adap,
+			    A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
+			t4_write_reg(adap,
+			    A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
+		}
+}
+
+/*
+ *	t4_get_lb_stats - collect loopback port statistics
+ *	@adap: the adapter
+ *	@idx: the loopback port index
+ *	@p: the stats structure to fill
+ *
+ *	Return HW statistics for the given loopback port.
+ */
+void
+t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
+{
+	u32 bgmap = get_mps_bg_map(adap, idx);
+
+#define	GET_STAT(name) \
+	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
+#define	GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+
+	p->octets		= GET_STAT(BYTES);
+	p->frames		= GET_STAT(FRAMES);
+	p->bcast_frames		= GET_STAT(BCAST);
+	p->mcast_frames		= GET_STAT(MCAST);
+	p->ucast_frames		= GET_STAT(UCAST);
+	p->error_frames		= GET_STAT(ERROR);
+
+	p->frames_64		= GET_STAT(64B);
+	p->frames_65_127	= GET_STAT(65B_127B);
+	p->frames_128_255	= GET_STAT(128B_255B);
+	p->frames_256_511	= GET_STAT(256B_511B);
+	p->frames_512_1023	= GET_STAT(512B_1023B);
+	p->frames_1024_1518	= GET_STAT(1024B_1518B);
+	p->frames_1519_max	= GET_STAT(1519B_MAX);
+	p->drop			= t4_read_reg(adap, PORT_REG(idx,
+	    A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
+
+	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/*
+ *	t4_wol_magic_enable - enable/disable magic packet WoL
+ *	@adap: the adapter
+ *	@port: the physical port index
+ *	@addr: MAC address expected in magic packets, %NULL to disable
+ *
+ *	Enables/disables magic packet wake-on-LAN for the selected port.
+ */
+void
+t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr)
+{
+	if (addr != NULL) {
+		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
+		    (addr[2] << 24) | (addr[3] << 16) |
+		    (addr[4] << 8) | addr[5]);
+		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
+		    (addr[0] << 8) | addr[1]);
+	}
+	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
+	    V_MAGICEN(addr != NULL));
+}
+
+/*
+ *	t4_wol_pat_enable - enable/disable pattern-based WoL
+ *	@adap: the adapter
+ *	@port: the physical port index
+ *	@map: bitmap of which HW pattern filters to set
+ *	@mask0: byte mask for bytes 0-63 of a packet
+ *	@mask1: byte mask for bytes 64-127 of a packet
+ *	@crc: Ethernet CRC for selected bytes
+ *	@enable: enable/disable switch
+ *
+ *	Sets the pattern filters indicated in @map to mask out the bytes
+ *	specified in @mask0/@mask1 in received packets and compare the CRC of
+ *	the resulting packet against @crc.  If @enable is %true pattern-based
+ *	WoL is enabled, otherwise disabled.
+ */
+int
+t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+    u64 mask0, u64 mask1, unsigned int crc, bool enable)
+{
+	int i;
+
+	if (!enable) {
+		t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
+		    F_PATEN, 0);
+		return (0);
+	}
+	if (map > 0xff)
+		return (-EINVAL);
+
+#define	EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
+
+	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
+	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
+	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
+
+	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
+		if (!(map & 1))
+			continue;
+
+		/* write byte masks */
+		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
+		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
+		(void) t4_read_reg(adap, EPIO_REG(OP));		/* flush */
+		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
+			return (-ETIMEDOUT);
+
+		/* write CRC */
+		t4_write_reg(adap, EPIO_REG(DATA0), crc);
+		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
+		(void) t4_read_reg(adap, EPIO_REG(OP));		/* flush */
+		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
+			return (-ETIMEDOUT);
+	}
+#undef EPIO_REG
+
+	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
+	return (0);
+}
+
+/*
+ *	t4_mk_filtdelwr - create a delete filter WR
+ *	@ftid: the filter ID
+ *	@wr: the filter work request to populate
+ *	@qid: ingress queue to receive the delete notification
+ *
+ *	Creates a filter work request to delete the supplied filter.  If @qid is
+ *	negative the delete notification is suppressed.
+ */
+void
+t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+	(void) memset(wr, 0, sizeof (*wr));
+	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
+	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof (*wr) / 16));
+	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
+	    V_FW_FILTER_WR_NOREPLY(qid < 0));
+	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
+	if (qid >= 0)
+		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
+#define	INIT_CMD(var, cmd, rd_wr) do { \
+	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
+	    F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
+	(var).retval_len16 = htonl(FW_LEN16(var)); \
+} while (0)
+
+/*
+ *	t4_mdio_rd - read a PHY register through MDIO
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@phy_addr: the PHY address
+ *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ *	@reg: the register to read
+ *	@valp: where to store the value
+ *
+ *	Issues a FW command through the given mailbox to read a PHY register.
+ */
+int
+t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+    unsigned int mmd, unsigned int reg, unsigned int *valp)
+{
+	int ret;
+	struct fw_ldst_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+	c.cycles_to_len16 = htonl(FW_LEN16(c));
+	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
+	    V_FW_LDST_CMD_MMD(mmd));
+	c.u.mdio.raddr = htons(reg);
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
+	if (ret == 0)
+		*valp = ntohs(c.u.mdio.rval);
+	return (ret);
+}
+
+/*
+ *	t4_mdio_wr - write a PHY register through MDIO
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@phy_addr: the PHY address
+ *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ *	@reg: the register to write
+ *	@valp: value to write
+ *
+ *	Issues a FW command through the given mailbox to write a PHY register.
+ */
+int
+t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+    unsigned int mmd, unsigned int reg, unsigned int val)
+{
+	struct fw_ldst_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+	c.cycles_to_len16 = htonl(FW_LEN16(c));
+	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
+	    V_FW_LDST_CMD_MMD(mmd));
+	c.u.mdio.raddr = htons(reg);
+	c.u.mdio.rval = htons(val);
+
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_sge_ctxt_rd - read an SGE context through FW
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@cid: the context id
+ *	@ctype: the context type
+ *	@data: where to store the context data
+ *
+ *	Issues a FW command through the given mailbox to read an SGE context.
+ */
+int
+t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+    enum ctxt_type ctype, u32 *data)
+{
+	int ret;
+	struct fw_ldst_cmd c;
+
+	if (ctype == CTXT_EGRESS)
+		ret = FW_LDST_ADDRSPC_SGE_EGRC;
+	else if (ctype == CTXT_INGRESS)
+		ret = FW_LDST_ADDRSPC_SGE_INGC;
+	else if (ctype == CTXT_FLM)
+		ret = FW_LDST_ADDRSPC_SGE_FLMC;
+	else
+		ret = FW_LDST_ADDRSPC_SGE_CONMC;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
+	c.cycles_to_len16 = htonl(FW_LEN16(c));
+	c.u.idctxt.physid = htonl(cid);
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
+	if (ret == 0) {
+		data[0] = ntohl(c.u.idctxt.ctxt_data0);
+		data[1] = ntohl(c.u.idctxt.ctxt_data1);
+		data[2] = ntohl(c.u.idctxt.ctxt_data2);
+		data[3] = ntohl(c.u.idctxt.ctxt_data3);
+		data[4] = ntohl(c.u.idctxt.ctxt_data4);
+		data[5] = ntohl(c.u.idctxt.ctxt_data5);
+	}
+	return (ret);
+}
+
+/*
+ *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
+ *	@adap: the adapter
+ *	@cid: the context id
+ *	@ctype: the context type
+ *	@data: where to store the context data
+ *
+ *	Reads an SGE context directly, bypassing FW.  This is only for
+ *	debugging when FW is unavailable.
+ */
+int
+t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
+    u32 *data)
+{
+	int i, ret;
+
+	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
+	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
+	if (!ret)
+		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
+			*data++ = t4_read_reg(adap, i);
+	return (ret);
+}
+
+/*
+ * 	t4_fw_hello - establish communication with FW
+ * 	@adap: the adapter
+ * 	@mbox: mailbox to use for the FW command
+ * 	@evt_mbox: mailbox to receive async FW events
+ * 	@master: specifies the caller's willingness to be the device master
+ * 	@state: returns the current device state (if non-NULL)
+ *
+ * 	Issues a command to establish communication with FW.  Returns either
+ *	an error (negative integer) or the mailbox of the Master PF.
+ */
+int
+t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+    enum dev_master master, enum dev_state *state)
+{
+	int ret;
+	struct fw_hello_cmd c;
+	u32 v;
+	unsigned int master_mbox;
+	int retries = FW_CMD_HELLO_RETRIES;
+
+retry:
+	(void) memset(&c, 0, sizeof (c));
+	/* LINTED: E_CONSTANT_CONDITION */
+	INIT_CMD(c, HELLO, WRITE);
+	c.err_to_clearinit = htonl(
+	    V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
+	    V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
+	    V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
+	    M_FW_HELLO_CMD_MBMASTER) |
+	    V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
+	    V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
+	    F_FW_HELLO_CMD_CLEARINIT);
+
+	/*
+	 * Issue the HELLO command to the firmware.  If it's not successful
+	 * but indicates that we got a "busy" or "timeout" condition, retry
+	 * the HELLO until we exhaust our retry limit.
+	 */
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
+	if (ret != FW_SUCCESS) {
+		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
+			goto retry;
+		return (ret);
+	}
+
+	v = ntohl(c.err_to_clearinit);
+	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
+	if (state != NULL) {
+		if (v & F_FW_HELLO_CMD_ERR)
+			*state = DEV_STATE_ERR;
+		else if (v & F_FW_HELLO_CMD_INIT)
+			*state = DEV_STATE_INIT;
+		else
+			*state = DEV_STATE_UNINIT;
+	}
+
+	/*
+	 * If we're not the Master PF then we need to wait around for the
+	 * Master PF Driver to finish setting up the adapter.
+	 *
+	 * Note that we also do this wait if we're a non-Master-capable PF and
+	 * there is no current Master PF; a Master PF may show up momentarily
+	 * and we wouldn't want to fail pointlessly.  (This can happen when an
+	 * OS loads lots of different drivers rapidly at the same time).  In
+	 * this case, the Master PF returned by the firmware will be
+	 * M_PCIE_FW_MASTER so the test below will work ...
+	 */
+	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
+	    master_mbox != mbox) {
+		int waiting = FW_CMD_HELLO_TIMEOUT;
+
+		/*
+		 * Wait for the firmware to either indicate an error or
+		 * initialized state.  If we see either of these we bail out
+		 * and report the issue to the caller.  If we exhaust the
+		 * "hello timeout" and we haven't exhausted our retries, try
+		 * again.  Otherwise bail with a timeout error.
+		 */
+		for (;;) {
+			u32 pcie_fw;
+
+			msleep(50);
+			waiting -= 50;
+
+			/*
+			 * If neither Error nor Initialialized are indicated
+			 * by the firmware keep waiting till we exaust our
+			 * timeout ... and then retry if we haven't exhausted
+			 * our retries ...
+			 */
+			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
+				if (waiting <= 0) {
+					if (retries-- > 0)
+						goto retry;
+
+					return (-ETIMEDOUT);
+				}
+				continue;
+			}
+
+			/*
+			 * We either have an Error or Initialized condition
+			 * report errors preferentially.
+			 */
+			if (state != NULL) {
+				if (pcie_fw & F_PCIE_FW_ERR)
+					*state = DEV_STATE_ERR;
+				else if (pcie_fw & F_PCIE_FW_INIT)
+					*state = DEV_STATE_INIT;
+			}
+
+			/*
+			 * If we arrived before a Master PF was selected and
+			 * there's not a valid Master PF, grab its identity
+			 * for our caller.
+			 */
+			if (master_mbox == M_PCIE_FW_MASTER &&
+			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
+				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
+			break;
+		}
+	}
+
+	return (master_mbox);
+}
+
+/*
+ *	t4_fw_bye - end communication with FW
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *
+ *	Issues a command to terminate communication with FW.
+ */
+int
+t4_fw_bye(struct adapter *adap, unsigned int mbox)
+{
+	struct fw_bye_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	/* LINTED: E_CONSTANT_CONDITION */
+	INIT_CMD(c, BYE, WRITE);
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_init_cmd - ask FW to initialize the device
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *
+ *	Issues a command to FW to partially initialize the device.  This
+ *	performs initialization that generally doesn't depend on user input.
+ */
+int
+t4_early_init(struct adapter *adap, unsigned int mbox)
+{
+	struct fw_initialize_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	/* LINTED: E_CONSTANT_CONDITION */
+	INIT_CMD(c, INITIALIZE, WRITE);
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_fw_reset - issue a reset to FW
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@reset: specifies the type of reset to perform
+ *
+ *	Issues a reset command of the specified type to FW.
+ */
+int
+t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
+{
+	struct fw_reset_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	/* LINTED: E_CONSTANT_CONDITION */
+	INIT_CMD(c, RESET, WRITE);
+	c.val = htonl(reset);
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_fw_config_file - setup an adapter via a Configuration File
+ *	@adap: the adapter
+ * 	@mbox: mailbox to use for the FW command
+ *	@mtype: the memory type where the Configuration File is located
+ *	@maddr: the memory address where the Configuration File is located
+ *	@finiver: return value for CF [fini] version
+ *	@finicsum: return value for CF [fini] checksum
+ *	@cfcsum: return value for CF computed checksum
+ *
+ *	Issue a command to get the firmware to process the Configuration
+ *	File located at the specified mtype/maddress.  If the Configuration
+ *	File is processed successfully and return value pointers are
+ *	provided, the Configuration File "[fini] section version and
+ *	checksum values will be returned along with the computed checksum.
+ *	It's up to the caller to decide how it wants to respond to the
+ *	checksums not matching but it recommended that a prominant warning
+ *	be emitted in order to help people rapidly identify changed or
+ *	corrupted Configuration Files.
+ *
+ *	Also note that it's possible to modify things like "niccaps",
+ *	"toecaps",etc. between processing the Configuration File and telling
+ *	the firmware to use the new configuration.  Callers which want to
+ *	do this will need to "hand-roll" their own CAPS_CONFIGS commands for
+ *	Configuration Files if they want to do this.
+ */
+int
+t4_fw_config_file(struct adapter *adap, unsigned int mbox, unsigned int mtype,
+    unsigned int maddr, u32 *finiver, u32 *finicsum, u32 *cfcsum)
+{
+	struct fw_caps_config_cmd caps_cmd;
+	int ret;
+
+	/*
+	 * Tell the firmware to process the indicated Configuration File.
+	 * If there are no errors and the caller has provided return value
+	 * pointers for the [fini] section version, checksum and computed
+	 * checksum, pass those back to the caller.
+	 */
+	(void) memset(&caps_cmd, 0, sizeof (caps_cmd));
+	caps_cmd.op_to_write =
+	    htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+	    F_FW_CMD_REQUEST |
+	    F_FW_CMD_READ);
+	caps_cmd.cfvalid_to_len16 =
+	    htonl(F_FW_CAPS_CONFIG_CMD_CFVALID |
+	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+	    FW_LEN16(caps_cmd));
+	ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof (caps_cmd), &caps_cmd);
+	if (ret < 0)
+		return (ret);
+
+	if (finiver != NULL)
+		*finiver = ntohl(caps_cmd.finiver);
+	if (finicsum != NULL)
+		*finicsum = ntohl(caps_cmd.finicsum);
+	if (cfcsum != NULL)
+		*cfcsum = ntohl(caps_cmd.cfcsum);
+
+	/*
+	 * And now tell the firmware to use the configuration we just loaded.
+	 */
+	caps_cmd.op_to_write =
+	    htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+	return (t4_wr_mbox(adap, mbox, &caps_cmd, sizeof (caps_cmd), NULL));
+}
+
+/*
+ *	t4_fixup_host_params - fix up host-dependent parameters
+ *	@adap: the adapter
+ *	@page_size: the host's Base Page Size
+ *	@cache_line_size: the host's Cache Line Size
+ *
+ *	Various registers in T4 contain values which are dependent on the
+ *	host's Base Page and Cache Line Sizes.  This function will fix all of
+ *	those registers with the appropriate values as passed in ...
+ */
+int
+t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+    unsigned int cache_line_size)
+{
+	unsigned int page_shift = fls(page_size) - 1;
+	unsigned int sge_hps = page_shift - 10;
+	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
+	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
+	unsigned int fl_align_log = fls(fl_align) - 1;
+
+	t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
+	    V_HOSTPAGESIZEPF0(sge_hps) |
+	    V_HOSTPAGESIZEPF1(sge_hps) |
+	    V_HOSTPAGESIZEPF2(sge_hps) |
+	    V_HOSTPAGESIZEPF3(sge_hps) |
+	    V_HOSTPAGESIZEPF4(sge_hps) |
+	    V_HOSTPAGESIZEPF5(sge_hps) |
+	    V_HOSTPAGESIZEPF6(sge_hps) |
+	    V_HOSTPAGESIZEPF7(sge_hps));
+
+	t4_set_reg_field(adap, A_SGE_CONTROL,
+	    V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
+	    F_EGRSTATUSPAGESIZE,
+	    V_INGPADBOUNDARY(fl_align_log - 5) |
+	    V_EGRSTATUSPAGESIZE(stat_len != 64));
+
+	/*
+	 * Adjust various SGE Free List Host Buffer Sizes.
+	 *
+	 * This is something of a crock since we're using fixed indices into
+	 * the array which are also known by the sge.c code and the T4
+	 * Firmware Configuration File.  We need to come up with a much better
+	 * approach to managing this array.  For now, the first four entries
+	 * are:
+	 *
+	 *   0: Host Page Size
+	 *   1: 64KB
+	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
+	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
+	 *
+	 * For the single-MTU buffers in unpacked mode we need to include
+	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
+	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
+	 * Padding boundry.  All of these are accommodated in the Factory
+	 * Default Firmware Configuration File but we need to adjust it for
+	 * this host's cache line size.
+	 */
+	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
+	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
+	    (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1) &
+	    ~(fl_align-1));
+	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
+	    (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1) &
+	    ~(fl_align-1));
+
+	t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
+
+	return (0);
+}
+
+/*
+ * 	t4_fw_initialize - ask FW to initialize the device
+ * 	@adap: the adapter
+ * 	@mbox: mailbox to use for the FW command
+ *
+ * 	Issues a command to FW to partially initialize the device.  This
+ * 	performs initialization that generally doesn't depend on user input.
+ */
+int
+t4_fw_initialize(struct adapter *adap, unsigned int mbox)
+{
+	struct fw_initialize_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	/* LINTED: E_CONSTANT_CONDITION */
+	INIT_CMD(c, INITIALIZE, WRITE);
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_query_params - query FW or device parameters
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF
+ *	@vf: the VF
+ *	@nparams: the number of parameters
+ *	@params: the parameter names
+ *	@val: the parameter values
+ *
+ *	Reads the value of FW or device parameters.  Up to 7 parameters can be
+ *	queried at once.
+ */
+int
+t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+    unsigned int vf, unsigned int nparams, const u32 *params, u32 *val)
+{
+	int i, ret;
+	struct fw_params_cmd c;
+	__be32 *p = &c.param[0].mnem;
+
+	if (nparams > 7)
+		return (-EINVAL);
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
+	    V_FW_PARAMS_CMD_VFN(vf));
+	c.retval_len16 = htonl(FW_LEN16(c));
+
+	for (i = 0; i < nparams; i++, p += 2)
+		*p = htonl(*params++);
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
+	if (ret == 0)
+		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
+			*val++ = ntohl(*p);
+	return (ret);
+}
+
+/*
+ *	t4_set_params - sets FW or device parameters
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF
+ *	@vf: the VF
+ *	@nparams: the number of parameters
+ *	@params: the parameter names
+ *	@val: the parameter values
+ *
+ *	Sets the value of FW or device parameters.  Up to 7 parameters can be
+ *	specified at once.
+ */
+int
+t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+    unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val)
+{
+	struct fw_params_cmd c;
+	__be32 *p = &c.param[0].mnem;
+
+	if (nparams > 7)
+		return (-EINVAL);
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
+	    V_FW_PARAMS_CMD_VFN(vf));
+	c.retval_len16 = htonl(FW_LEN16(c));
+
+	while (nparams--) {
+		*p++ = htonl(*params++);
+		*p++ = htonl(*val++);
+	}
+
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_cfg_pfvf - configure PF/VF resource limits
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF being configured
+ *	@vf: the VF being configured
+ *	@txq: the max number of egress queues
+ *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
+ *	@rxqi: the max number of interrupt-capable ingress queues
+ *	@rxq: the max number of interruptless ingress queues
+ *	@tc: the PCI traffic class
+ *	@vi: the max number of virtual interfaces
+ *	@cmask: the channel access rights mask for the PF/VF
+ *	@pmask: the port access rights mask for the PF/VF
+ *	@nexact: the maximum number of exact MPS filters
+ *	@rcaps: read capabilities
+ *	@wxcaps: write/execute capabilities
+ *
+ *	Configures resource limits and capabilities for a physical or virtual
+ *	function.
+ */
+int
+t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+    unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+    unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi,
+    unsigned int cmask, unsigned int pmask, unsigned int nexact,
+    unsigned int rcaps, unsigned int wxcaps)
+{
+	struct fw_pfvf_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | V_FW_PFVF_CMD_VFN(vf));
+	c.retval_len16 = htonl(FW_LEN16(c));
+	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
+	    V_FW_PFVF_CMD_NIQ(rxq));
+	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
+	    V_FW_PFVF_CMD_PMASK(pmask) | V_FW_PFVF_CMD_NEQ(txq));
+	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
+	    V_FW_PFVF_CMD_NEXACTF(nexact));
+	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
+	    V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
+	    V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_alloc_vi - allocate a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@port: physical port associated with the VI
+ *	@pf: the PF owning the VI
+ *	@vf: the VF owning the VI
+ *	@nmac: number of MAC addresses needed (1 to 5)
+ *	@mac: the MAC addresses of the VI
+ *	@rss_size: size of RSS table slice associated with this VI
+ *
+ *	Allocates a virtual interface for the given physical port.  If @mac is
+ *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
+ *	@mac should be large enough to hold @nmac Ethernet addresses, they are
+ *	stored consecutively so the space needed is @nmac * 6 bytes.
+ *	Returns a negative error number or the non-negative VI id.
+ */
+int
+t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+    unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+    unsigned int *rss_size)
+{
+	int ret;
+	struct fw_vi_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+	    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
+	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
+	c.nmac = nmac - 1;
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
+	if (ret != 0)
+		return (ret);
+
+	if (mac != NULL) {
+		(void) memcpy(mac, c.mac, sizeof (c.mac));
+		switch (nmac) {
+		case 5:
+			(void) memcpy(mac + 24, c.nmac3, sizeof (c.nmac3));
+		/* FALLTHRU */
+		case 4:
+			(void) memcpy(mac + 18, c.nmac2, sizeof (c.nmac2));
+		/* FALLTHRU */
+		case 3:
+			(void) memcpy(mac + 12, c.nmac1, sizeof (c.nmac1));
+		/* FALLTHRU */
+		case 2:
+			(void) memcpy(mac + 6,  c.nmac0, sizeof (c.nmac0));
+		}
+	}
+	if (rss_size != NULL)
+		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd));
+	return (G_FW_VI_CMD_VIID(ntohs(c.type_to_viid)));
+}
+
+/*
+ *	t4_free_vi - free a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the VI
+ *	@vf: the VF owning the VI
+ *	@viid: virtual interface identifiler
+ *
+ *	Free a previously allocated virtual interface.
+ */
+int
+t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+    unsigned int vf, unsigned int viid)
+{
+	struct fw_vi_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
+	    F_FW_CMD_REQUEST |
+	    F_FW_CMD_EXEC |
+	    V_FW_VI_CMD_PFN(pf) |
+	    V_FW_VI_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
+	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
+
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), &c));
+}
+
+/*
+ *	t4_set_rxmode - set Rx properties of a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@mtu: the new MTU or -1
+ *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
+ *	@sleep_ok: if true we may sleep while awaiting command completion
+ *
+ *	Sets Rx properties of a virtual interface.
+ */
+int
+t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+    int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok)
+{
+	struct fw_vi_rxmode_cmd c;
+
+	/* convert to FW values */
+	if (mtu < 0)
+		mtu = M_FW_VI_RXMODE_CMD_MTU;
+	if (promisc < 0)
+		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
+	if (all_multi < 0)
+		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
+	if (bcast < 0)
+		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
+	if (vlanex < 0)
+		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
+	c.retval_len16 = htonl(FW_LEN16(c));
+	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
+	    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+	    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+	    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+	    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+	return (t4_wr_mbox_meat(adap, mbox, &c, sizeof (c), NULL, sleep_ok));
+}
+
+/*
+ *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@free: if true any existing filters for this VI id are first removed
+ *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
+ *	@addr: the MAC address(es)
+ *	@idx: where to store the index of each allocated filter
+ *	@hash: pointer to hash address filter bitmap
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Allocates an exact-match filter for each of the supplied addresses and
+ *	sets it to the corresponding address.  If @idx is not %NULL it should
+ *	have at least @naddr entries, each of which will be set to the index of
+ *	the filter allocated for the corresponding MAC address.  If a filter
+ *	could not be allocated for an address its index is set to 0xffff.
+ *	If @hash is not %NULL addresses that fail to allocate an exact filter
+ *	are hashed and update the hash filter bitmap pointed at by @hash.
+ *
+ *	Returns a negative error number or the number of filters allocated.
+ */
+int
+t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid,
+    bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash,
+    bool sleep_ok)
+{
+	int offset, ret = 0;
+	struct fw_vi_mac_cmd c;
+	unsigned int nfilters = 0;
+	unsigned int rem = naddr;
+
+	if (naddr > NUM_MPS_CLS_SRAM_L_INSTANCES)
+		return (-EINVAL);
+
+	for (offset = 0; offset < naddr; /* */) {
+		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
+		    ? rem : ARRAY_SIZE(c.u.exact));
+		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+		    u.exact[fw_naddr]), 16);
+		struct fw_vi_mac_exact *p;
+		int i;
+
+		(void) memset(&c, 0, sizeof (c));
+		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+		    F_FW_CMD_REQUEST |
+		    F_FW_CMD_WRITE |
+		    V_FW_CMD_EXEC(free) |
+		    V_FW_VI_MAC_CMD_VIID(viid));
+		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
+		    V_FW_CMD_LEN16(len16));
+
+		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+			p->valid_to_idx = htons(
+			    F_FW_VI_MAC_CMD_VALID |
+			    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+			(void) memcpy(p->macaddr, addr[offset+i],
+			    sizeof (p->macaddr));
+		}
+
+		/*
+		 * It's okay if we run out of space in our MAC address arena.
+		 * Some of the addresses we submit may get stored so we need
+		 * to run through the reply to see what the results were ...
+		 */
+		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof (c), &c, sleep_ok);
+		if (ret && ret != -FW_ENOMEM)
+			break;
+
+		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
+
+			if (idx != NULL)
+				idx[offset+i] =
+				    (index >= NUM_MPS_CLS_SRAM_L_INSTANCES ?
+				    0xffff : index);
+			if (index < NUM_MPS_CLS_SRAM_L_INSTANCES)
+				nfilters++;
+			else if (hash != NULL)
+				*hash |=
+				    (1ULL << hash_mac_addr(addr[offset+i]));
+		}
+
+		free = false;
+		offset += fw_naddr;
+		rem -= fw_naddr;
+	}
+
+	if (ret == 0 || ret == -FW_ENOMEM)
+		ret = nfilters;
+	return (ret);
+}
+
+/*
+ *	t4_change_mac - modifies the exact-match filter for a MAC address
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@idx: index of existing filter for old value of MAC address, or -1
+ *	@addr: the new MAC address value
+ *	@persist: whether a new MAC allocation should be persistent
+ *	@add_smt: if true also add the address to the HW SMT
+ *
+ *	Modifies an exact-match filter and sets it to the new MAC address if
+ *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
+ *	latter case the address is added persistently if @persist is %true.
+ *
+ *	Note that in general it is not possible to modify the value of a given
+ *	filter so the generic way to modify an address filter is to free the one
+ *	being used by the old address value and allocate a new filter for the
+ *	new address value.
+ *
+ *	Returns a negative error number or the index of the filter with the new
+ *	MAC value.  Note that this index may differ from @idx.
+ */
+int
+t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+    int idx, const u8 *addr, bool persist, bool add_smt)
+{
+	int ret, mode;
+	struct fw_vi_mac_cmd c;
+	struct fw_vi_mac_exact *p = c.u.exact;
+
+	if (idx < 0)				/* new allocation */
+		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
+	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
+	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
+	    V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | V_FW_VI_MAC_CMD_IDX(idx));
+	(void) memcpy(p->macaddr, addr, sizeof (p->macaddr));
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
+	if (ret == 0) {
+		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
+		if (ret >= NUM_MPS_CLS_SRAM_L_INSTANCES)
+			ret = -ENOMEM;
+	}
+	return (ret);
+}
+
+/*
+ *	t4_set_addr_hash - program the MAC inexact-match hash filter
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@ucast: whether the hash filter should also match unicast addresses
+ *	@vec: the value to be written to the hash filter
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Sets the 64-bit inexact-match hash filter for a virtual interface.
+ */
+int
+t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+    bool ucast, u64 vec, bool sleep_ok)
+{
+	struct fw_vi_mac_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
+	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
+	    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1));
+	c.u.hash.hashvec = cpu_to_be64(vec);
+	return (t4_wr_mbox_meat(adap, mbox, &c, sizeof (c), NULL, sleep_ok));
+}
+
+/*
+ *	t4_enable_vi - enable/disable a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@rx_en: 1=enable Rx, 0=disable Rx
+ *	@tx_en: 1=enable Tx, 0=disable Tx
+ *
+ *	Enables/disables a virtual interface.
+ */
+int
+t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+    bool rx_en, bool tx_en)
+{
+	struct fw_vi_enable_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
+	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
+	    V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_identify_port - identify a VI's port by blinking its LED
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@nblinks: how many times to blink LED at 2.5 Hz
+ *
+ *	Identifies a VI's port by blinking its LED.
+ */
+int
+t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+    unsigned int nblinks)
+{
+	struct fw_vi_enable_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
+	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
+	c.blinkdur = htons(nblinks);
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@start: %true to enable the queues, %false to disable them
+ *	@pf: the PF owning the queues
+ *	@vf: the VF owning the queues
+ *	@iqid: ingress queue id
+ *	@fl0id: FL0 queue id or 0xffff if no attached FL0
+ *	@fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ *	Starts or stops an ingress queue and its associated FLs, if any.
+ */
+int
+t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+    unsigned int pf, unsigned int vf, unsigned int iqid, unsigned int fl0id,
+    unsigned int fl1id)
+{
+	struct fw_iq_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
+	    V_FW_IQ_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
+	    V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
+	c.iqid = htons(iqid);
+	c.fl0id = htons(fl0id);
+	c.fl1id = htons(fl1id);
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_iq_free - free an ingress queue and its FLs
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queues
+ *	@vf: the VF owning the queues
+ *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ *	@iqid: ingress queue id
+ *	@fl0id: FL0 queue id or 0xffff if no attached FL0
+ *	@fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ *	Frees an ingress queue and its associated FLs, if any.
+ */
+int
+t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+    unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id,
+    unsigned int fl1id)
+{
+	struct fw_iq_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
+	    V_FW_IQ_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
+	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
+	c.iqid = htons(iqid);
+	c.fl0id = htons(fl0id);
+	c.fl1id = htons(fl1id);
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_eth_eq_free - free an Ethernet egress queue
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queue
+ *	@vf: the VF owning the queue
+ *	@eqid: egress queue id
+ *
+ *	Frees an Ethernet egress queue.
+ */
+int
+t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+    unsigned int vf, unsigned int eqid)
+{
+	struct fw_eq_eth_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
+	    V_FW_EQ_ETH_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
+	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_ctrl_eq_free - free a control egress queue
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queue
+ *	@vf: the VF owning the queue
+ *	@eqid: egress queue id
+ *
+ *	Frees a control egress queue.
+ */
+int
+t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+    unsigned int vf, unsigned int eqid)
+{
+	struct fw_eq_ctrl_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
+	    V_FW_EQ_CTRL_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_ofld_eq_free - free an offload egress queue
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queue
+ *	@vf: the VF owning the queue
+ *	@eqid: egress queue id
+ *
+ *	Frees a control egress queue.
+ */
+int
+t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+    unsigned int vf, unsigned int eqid)
+{
+	struct fw_eq_ofld_cmd c;
+
+	(void) memset(&c, 0, sizeof (c));
+	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
+	    V_FW_EQ_OFLD_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
+	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
+	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
+}
+
+/*
+ *	t4_handle_fw_rpl - process a FW reply message
+ *	@adap: the adapter
+ *	@rpl: start of the FW message
+ *
+ *	Processes a FW message, such as link state change messages.
+ */
+int
+t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
+{
+	u8 opcode = *(const u8 *)rpl;
+
+	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
+		int i;
+		const struct fw_port_cmd *p = (const void *)rpl;
+		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
+		struct port_info *pi = NULL;
+		struct link_config *lc;
+		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+		unsigned char link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
+		unsigned char fc = 0;
+		unsigned short speed = 0;
+		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
+
+		if (stat & F_FW_PORT_CMD_RXPAUSE)
+			fc |= PAUSE_RX;
+		if (stat & F_FW_PORT_CMD_TXPAUSE)
+			fc |= PAUSE_TX;
+		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+			speed = SPEED_100;
+		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+			speed = SPEED_1000;
+		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+			speed = SPEED_10000;
+
+		for_each_port(adap, i) {
+			pi = adap2pinfo(adap, i);
+			if (pi->tx_chan == chan)
+				break;
+		}
+		lc = &pi->link_cfg;
+		if (link_ok != lc->link_ok || speed != lc->speed ||
+		    fc != lc->fc) {		/* something changed */
+			lc->link_ok = link_ok;
+			lc->speed = speed;
+			lc->fc = fc;
+			t4_os_link_changed(adap, i, link_ok);
+		}
+		if (mod != pi->mod_type) {
+			/* LINTED: E_ASSIGN_NARROW_CONV */
+			pi->mod_type = mod;
+			t4_os_portmod_changed(adap, i);
+		}
+	}
+	return (0);
+}
+
+/*
+ *	get_pci_mode - determine a card's PCI mode
+ *	@adapter: the adapter
+ *	@p: where to store the PCI settings
+ *
+ *	Determines a card's PCI mode and associated parameters, such as speed
+ *	and width.
+ */
+static void __devinit
+get_pci_mode(struct adapter *adapter, struct pci_params *p)
+{
+	u16 val;
+	u32 pcie_cap;
+
+	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
+	if (pcie_cap != NULL) {
+		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
+		p->speed = val & PCI_EXP_LNKSTA_CLS;
+		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
+	}
+}
+
+/*
+ *	init_link_config - initialize a link's SW state
+ *	@lc: structure holding the link state
+ *	@caps: link capabilities
+ *
+ *	Initializes the SW state maintained for each link, including the link's
+ *	capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void __devinit
+init_link_config(struct link_config *lc, unsigned short caps)
+{
+	lc->supported = caps;
+	lc->requested_speed = 0;
+	lc->speed = 0;
+	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	if (lc->supported & FW_PORT_CAP_ANEG) {
+		lc->advertising = lc->supported & ADVERT_MASK;
+		lc->autoneg = AUTONEG_ENABLE;
+		lc->requested_fc |= PAUSE_AUTONEG;
+	} else {
+		lc->advertising = 0;
+		lc->autoneg = AUTONEG_DISABLE;
+	}
+}
+
+static int __devinit
+wait_dev_ready(struct adapter *adap)
+{
+	u32 whoami;
+
+	whoami = t4_read_reg(adap, A_PL_WHOAMI);
+
+	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
+		return (0);
+
+	msleep(500);
+	whoami = t4_read_reg(adap, A_PL_WHOAMI);
+	return ((whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS ?
+	    0 : -EIO));
+}
+
+static int __devinit
+get_flash_params(struct adapter *adapter)
+{
+	int ret;
+	u32 info = 0;
+
+	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
+	if (!ret)
+		ret = sf1_read(adapter, 3, 0, 1, &info);
+	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
+	if (ret < 0)
+		return (ret);
+
+	if ((info & 0xff) != 0x20)		/* not a Numonix flash */
+		return (-EINVAL);
+	info >>= 16;				/* log2 of size */
+	if (info >= 0x14 && info < 0x18)
+		adapter->params.sf_nsec = 1 << (info - 16);
+	else if (info == 0x18)
+		adapter->params.sf_nsec = 64;
+	else
+		return (-EINVAL);
+	adapter->params.sf_size = 1 << info;
+	return (0);
+}
+
+/*
+ *	t4_prep_adapter - prepare SW and HW for operation
+ *	@adapter: the adapter
+ *	@reset: if true perform a HW reset
+ *
+ *	Initialize adapter SW state for the various HW modules, set initial
+ *	values for some adapter tunables, take PHYs out of reset, and
+ *	initialize the MDIO interface.
+ */
+int __devinit
+t4_prep_adapter(struct adapter *adapter)
+{
+	int ret;
+
+	ret = wait_dev_ready(adapter);
+	if (ret < 0)
+		return (ret);
+
+	get_pci_mode(adapter, &adapter->params.pci);
+
+	adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
+	if (adapter->params.rev == 1) {
+		CH_ALERT(adapter, "T4 rev 1 chip is no longer supported\n");
+		return (-EINVAL);
+	}
+	adapter->params.pci.vpd_cap_addr =
+	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
+
+	ret = get_flash_params(adapter);
+	if (ret < 0)
+		return (ret);
+
+	ret = get_vpd_params(adapter, &adapter->params.vpd);
+	if (ret < 0)
+		return (ret);
+
+	if (t4_read_reg(adapter, A_SGE_PC0_REQ_BIST_CMD) != 0xffffffff)
+		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
+	else
+		adapter->params.cim_la_size = CIMLA_SIZE;
+
+	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+
+	/*
+	 * Default port for debugging in case we can't reach FW.
+	 */
+	adapter->params.nports = 1;
+	adapter->params.portvec = 1;
+	adapter->params.vpd.cclk = 50000;
+
+	return (0);
+}
+
+int __devinit
+t4_port_init(struct port_info *p, int mbox, int pf, int vf)
+{
+	u8 addr[6];
+	int ret, i, j;
+	struct fw_port_cmd c;
+	unsigned int rss_size;
+	adapter_t *adap = p->adapter;
+
+	(void) memset(&c, 0, sizeof (c));
+
+	for (i = 0, j = -1; i <= p->port_id; i++) {
+		do {
+			j++;
+		} while ((adap->params.portvec & (1 << j)) == 0);
+	}
+
+	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
+	    F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_PORT_CMD_PORTID(j));
+	c.action_to_len16 = htonl(
+	    V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+	    FW_LEN16(c));
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
+	if (ret != 0)
+		return (ret);
+
+	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
+	if (ret < 0)
+		return (ret);
+
+	p->viid = (uint16_t)ret;
+	p->tx_chan = (uint8_t)j;
+	p->lport = (uint8_t)j;
+	p->rss_size = (uint16_t)rss_size;
+	t4_os_set_hw_addr(adap, p->port_id, addr);
+
+	ret = ntohl(c.u.info.lstatus_to_modtype);
+	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
+	    G_FW_PORT_CMD_MDIOADDR(ret) : -1;
+	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
+	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
+
+	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+
+	return (0);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/cxgbe/common/t4_hw.h	Thu May 23 09:51:05 2013 -0400
@@ -0,0 +1,257 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * This file is part of the Chelsio T4 Ethernet driver.
+ *
+ * Copyright (C) 2009-2013 Chelsio Communications.  All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+#ifndef __CXGBE_T4_HW_H
+#define	__CXGBE_T4_HW_H
+
+#include "osdep.h"
+
+enum {
+	NCHAN		= 4,	/* # of HW channels */
+	MAX_MTU		= 9600,	/* max MAC MTU, excluding header + FCS */
+	EEPROMSIZE	= 17408, /* Serial EEPROM physical size */
+	EEPROMVSIZE	= 32768, /* Serial EEPROM virtual address space size */
+	EEPROMPFSIZE	= 1024,	/* EEPROM writable area size for PFn, n>0 */
+	RSS_NENTRIES	= 2048,	/* # of entries in RSS mapping table */
+	TCB_SIZE	= 128,	/* TCB size */
+	NMTUS		= 16,	/* size of MTU table */
+	NCCTRL_WIN	= 32,	/* # of congestion control windows */
+	NTX_SCHED	= 8,	/* # of HW Tx scheduling queues */
+	PM_NSTATS	= 5,	/* # of PM stats */
+	MBOX_LEN	= 64,	/* mailbox size in bytes */
+	TRACE_LEN	= 112,	/* length of trace data and mask */
+	FILTER_OPT_LEN	= 36,	/* filter tuple width for optional */
+				/* components */
+	NWOL_PAT	= 8,	/* # of WoL patterns */
+	WOL_PAT_LEN	= 128,	/* length of WoL patterns */
+};
+
+enum {
+	CIM_NUM_IBQ	= 6,	/* # of CIM IBQs */
+	CIM_NUM_OBQ	= 6,	/* # of CIM OBQs */
+	CIMLA_SIZE	= 2048,	/* # of 32-bit words in CIM LA */
+	CIM_PIFLA_SIZE	= 64,	/* # of 192-bit words in CIM PIF LA */
+	CIM_MALA_SIZE	= 64,	/* # of 160-bit words in CIM MA LA */
+	CIM_IBQ_SIZE	= 128,	/* # of 128-bit words in a CIM IBQ */
+	TPLA_SIZE	= 128,	/* # of 64-bit words in TP LA */
+	ULPRX_LA_SIZE	= 512,	/* # of 256-bit words in ULP_RX LA */
+};
+
+enum {
+	SF_PAGE_SIZE = 256,		/* serial flash page size */
+	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
+};
+
+/* SGE context types */
+enum ctxt_type { CTXT_EGRESS, CTXT_INGRESS, CTXT_FLM, CTXT_CNM };
+
+enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
+
+enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV };    /* mailbox owners */
+
+enum {
+	SGE_MAX_WR_LEN = 512,	/* max WR size in bytes */
+	SGE_CTXT_SIZE = 24,	/* size of SGE context */
+	SGE_NTIMERS = 6,	/* # of interrupt holdoff timer values */
+	SGE_NCOUNTERS = 4,	/* # of interrupt packet counter values */
+};
+
+struct sge_qstat {		/* data written to SGE queue status entries */
+	volatile __be32 qid;
+	volatile __be16 cidx;
+	volatile __be16 pidx;
+};
+
+#define	S_QSTAT_PIDX    0
+#define	M_QSTAT_PIDX    0xffff
+#define	G_QSTAT_PIDX(x) (((x) >> S_QSTAT_PIDX) & M_QSTAT_PIDX)
+
+#define	S_QSTAT_CIDX    16
+#define	M_QSTAT_CIDX    0xffff
+#define	G_QSTAT_CIDX(x) (((x) >> S_QSTAT_CIDX) & M_QSTAT_CIDX)
+
+/*
+ * Structure for last 128 bits of response descriptors
+ */
+struct rsp_ctrl {
+	__be32 hdrbuflen_pidx;
+	__be32 pldbuflen_qid;
+	union {
+		u8 type_gen;
+		__be64 last_flit;
+	} u;
+};
+
+#define	S_RSPD_NEWBUF    31
+#define	V_RSPD_NEWBUF(x) ((x) << S_RSPD_NEWBUF)
+#define	F_RSPD_NEWBUF    V_RSPD_NEWBUF(1U)
+
+#define	S_RSPD_LEN    0
+#define	M_RSPD_LEN    0x7fffffff
+#define	V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
+#define	G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
+
+#define	S_RSPD_QID    S_RSPD_LEN
+#define	M_RSPD_QID    M_RSPD_LEN
+#define	V_RSPD_QID(x) V_RSPD_LEN(x)
+#define	G_RSPD_QID(x) G_RSPD_LEN(x)
+
+#define	S_RSPD_GEN    7
+#define	V_RSPD_GEN(x) ((x) << S_RSPD_GEN)
+#define	F_RSPD_GEN    V_RSPD_GEN(1U)
+
+#define	S_RSPD_QOVFL    6
+#define	V_RSPD_QOVFL(x) ((x) << S_RSPD_QOVFL)
+#define	F_RSPD_QOVFL    V_RSPD_QOVFL(1U)
+
+#define	S_RSPD_TYPE    4
+#define	M_RSPD_TYPE    0x3
+#define	V_RSPD_TYPE(x) ((x) << S_RSPD_TYPE)
+#define	G_RSPD_TYPE(x) (((x) >> S_RSPD_TYPE) & M_RSPD_TYPE)
+
+/* Rx queue interrupt deferral fields: counter enable and timer index */
+#define	S_QINTR_CNT_EN    0
+#define	V_QINTR_CNT_EN(x) ((x) << S_QINTR_CNT_EN)
+#define	F_QINTR_CNT_EN    V_QINTR_CNT_EN(1U)
+
+#define	S_QINTR_TIMER_IDX    1
+#define	M_QINTR_TIMER_IDX    0x7
+#define	V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX)
+#define	G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX)
+
+/* # of pages a pagepod can hold without needing another pagepod */
+#define	PPOD_PAGES 4U
+
+struct pagepod {
+	__be64 vld_tid_pgsz_tag_color;
+	__be64 len_offset;
+	__be64 rsvd;
+	__be64 addr[PPOD_PAGES + 1];
+};
+
+#define	S_PPOD_COLOR    0
+#define	M_PPOD_COLOR    0x3F
+#define	V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR)
+
+#define	S_PPOD_TAG    6
+#define	M_PPOD_TAG    0xFFFFFF
+#define	V_PPOD_TAG(x) ((x) << S_PPOD_TAG)
+
+#define	S_PPOD_PGSZ    30
+#define	M_PPOD_PGSZ    0x3
+#define	V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ)
+
+#define	S_PPOD_TID    32
+#define	M_PPOD_TID    0xFFFFFF
+#define	V_PPOD_TID(x) ((__u64)(x) << S_PPOD_TID)
+
+#define	S_PPOD_VALID    56
+#define	V_PPOD_VALID(x) ((__u64)(x) << S_PPOD_VALID)
+#define	F_PPOD_VALID    V_PPOD_VALID(1ULL)
+
+#define	S_PPOD_LEN    32
+#define	M_PPOD_LEN    0xFFFFFFFF
+#define	V_PPOD_LEN(x) ((__u64)(x) << S_PPOD_LEN)
+
+#define	S_PPOD_OFST    0
+#define	M_PPOD_OFST    0xFFFFFFFF
+#define	V_PPOD_OFST(x) ((x) << S_PPOD_OFST)
+
+/*
+ * Flash layout.
+ */
+#define	FLASH_START(start)	((start) * SF_SEC_SIZE)
+#define	FLASH_MAX_SIZE(nsecs)	((nsecs) * SF_SEC_SIZE)
+
+enum {
+	/*
+	 * Various Expansion-ROM boot images, etc.
+	 */
+	FLASH_EXP_ROM_START_SEC = 0,
+	FLASH_EXP_ROM_NSECS = 6,
+	FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
+	FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+
+	/*
+	 * iSCSI Boot Firmware Table (iBFT) and other driver-related
+	 * parameters ...
+	 */
+	FLASH_IBFT_START_SEC = 6,
+	FLASH_IBFT_NSECS = 1,
+	FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
+	FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
+
+	/*
+	 * Boot configuration data.
+	 */
+	FLASH_BOOTCFG_START_SEC = 7,
+	FLASH_BOOTCFG_NSECS = 1,
+	FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
+	FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
+
+	/*
+	 * Location of firmware image in FLASH.
+	 */
+	FLASH_FW_START_SEC = 8,
+	FLASH_FW_NSECS = 8,
+	FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+	FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+	/*
+	 * iSCSI persistent/crash information.
+	 */
+	FLASH_ISCSI_CRASH_START_SEC = 29,
+	FLASH_ISCSI_CRASH_NSECS = 1,
+	FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
+	FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
+
+	/*
+	 * FCoE persistent/crash information.
+	 */
+	FLASH_FCOE_CRASH_START_SEC = 30,
+	FLASH_FCOE_CRASH_NSECS = 1,
+	FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
+	FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
+
+	/*
+	 * Location of Firmware Configuration File in FLASH.  Since the FPGA
+	 * "FLASH" is smaller we need to store the Configuration File in a
+	 * different location -- which will overlap the end of the firmware
+	 * image if firmware ever gets that large ...
+	 */
+	FLASH_CFG_START_SEC = 31,
+	FLASH_CFG_NSECS = 1,
+	FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
+	FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+
+	FLASH_FPGA_CFG_START_SEC = 15,
+	FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC),
+
+	/*
+	 * Sectors 32-63 are reserved for FLASH failover.
+	 */
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
+#endif /* __CXGBE_T4_HW_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/cxgbe/common/t4_msg.h	Thu May 23 09:51:05 2013 -0400
@@ -0,0 +1,2112 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Definitions of T4 work request and CPL5 commands and status codes.
+ *
+ * Copyright (C) 2008-2013 Chelsio Communications.  All rights reserved.
+ *
+ * Written by Dimitris Michailidis (dm@chelsio.com)
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+#ifndef __CXGBE_T4_MSG_H
+#define	__CXGBE_T4_MSG_H
+
+enum {
+	CPL_PASS_OPEN_REQ	= 0x1,
+	CPL_PASS_ACCEPT_RPL	= 0x2,
+	CPL_ACT_OPEN_REQ	= 0x3,
+	CPL_SET_TCB		= 0x4,
+	CPL_SET_TCB_FIELD	= 0x5,
+	CPL_GET_TCB		= 0x6,
+	CPL_PCMD		= 0x7,
+	CPL_CLOSE_CON_REQ	= 0x8,
+	CPL_CLOSE_LISTSRV_REQ	= 0x9,
+	CPL_ABORT_REQ		= 0xA,
+	CPL_ABORT_RPL		= 0xB,
+	CPL_TX_DATA		= 0xC,
+	CPL_RX_DATA_ACK		= 0xD,
+	CPL_TX_PKT		= 0xE,
+	CPL_RTE_DELETE_REQ	= 0xF,
+	CPL_RTE_WRITE_REQ	= 0x10,
+	CPL_RTE_READ_REQ	= 0x11,
+	CPL_L2T_WRITE_REQ	= 0x12,
+	CPL_L2T_READ_REQ	= 0x13,
+	CPL_SMT_WRITE_REQ	= 0x14,
+	CPL_SMT_READ_REQ	= 0x15,
+	CPL_BARRIER		= 0x18,
+	CPL_TID_RELEASE		= 0x1A,
+	CPL_RX_MPS_PKT		= 0x1B,
+
+	CPL_CLOSE_LISTSRV_RPL	= 0x20,
+	CPL_ERROR		= 0x21,
+	CPL_GET_TCB_RPL		= 0x22,
+	CPL_L2T_WRITE_RPL	= 0x23,
+	CPL_PASS_OPEN_RPL	= 0x24,
+	CPL_ACT_OPEN_RPL	= 0x25,
+	CPL_PEER_CLOSE		= 0x26,
+	CPL_RTE_DELETE_RPL	= 0x27,
+	CPL_RTE_WRITE_RPL	= 0x28,
+	CPL_RX_URG_PKT		= 0x29,
+	CPL_ABORT_REQ_RSS	= 0x2B,
+	CPL_RX_URG_NOTIFY	= 0x2C,
+	CPL_ABORT_RPL_RSS	= 0x2D,
+	CPL_SMT_WRITE_RPL	= 0x2E,
+	CPL_TX_DATA_ACK		= 0x2F,
+
+	CPL_RX_PHYS_ADDR	= 0x30,
+	CPL_PCMD_READ_RPL	= 0x31,
+	CPL_CLOSE_CON_RPL	= 0x32,
+	CPL_ISCSI_HDR		= 0x33,
+	CPL_L2T_READ_RPL	= 0x34,
+	CPL_RDMA_CQE		= 0x35,
+	CPL_RDMA_CQE_READ_RSP	= 0x36,
+	CPL_RDMA_CQE_ERR	= 0x37,
+	CPL_RTE_READ_RPL	= 0x38,
+	CPL_RX_DATA		= 0x39,
+	CPL_SET_TCB_RPL		= 0x3A,
+	CPL_RX_PKT		= 0x3B,
+	CPL_PCMD_RPL		= 0x3C,
+	CPL_HIT_NOTIFY		= 0x3D,
+	CPL_PKT_NOTIFY		= 0x3E,
+	CPL_RX_DDP_COMPLETE	= 0x3F,
+
+	CPL_ACT_ESTABLISH	= 0x40,
+	CPL_PASS_ESTABLISH	= 0x41,
+	CPL_RX_DATA_DDP		= 0x42,
+	CPL_SMT_READ_RPL	= 0x43,
+	CPL_PASS_ACCEPT_REQ	= 0x44,
+	CPL_RX2TX_PKT		= 0x45,
+	CPL_RX_FCOE_DDP		= 0x46,
+	CPL_FCOE_HDR		= 0x47,
+
+	CPL_RDMA_READ_REQ	= 0x60,
+
+	CPL_SET_LE_REQ		= 0x80,
+	CPL_PASS_OPEN_REQ6	= 0x81,
+	CPL_ACT_OPEN_REQ6	= 0x83,
+
+	CPL_TX_DMA_ACK		= 0xA0,
+	CPL_RDMA_TERMINATE	= 0xA2,
+	CPL_RDMA_WRITE		= 0xA4,
+	CPL_SGE_EGR_UPDATE	= 0xA5,
+	CPL_SET_LE_RPL		= 0xA6,
+	CPL_FW2_MSG		= 0xA7,
+	CPL_FW2_PLD		= 0xA8,
+
+	CPL_TRACE_PKT		= 0xB0,
+	CPL_RX2TX_DATA		= 0xB1,
+
+	CPL_FW4_MSG		= 0xC0,
+	CPL_FW4_PLD		= 0xC1,
+	CPL_FW4_ACK		= 0xC3,
+
+	CPL_FW6_MSG		= 0xE0,
+	CPL_FW6_PLD		= 0xE1,
+	CPL_TX_PKT_LSO		= 0xED,
+	CPL_TX_PKT_XT		= 0xEE,
+
+	NUM_CPL_CMDS	/* must be last and previous entries must be sorted */
+};
+
+enum CPL_error {
+	CPL_ERR_NONE		   = 0,
+	CPL_ERR_TCAM_PARITY	   = 1,
+	CPL_ERR_TCAM_FULL	   = 3,
+	CPL_ERR_BAD_LENGTH	   = 15,
+	CPL_ERR_BAD_ROUTE	   = 18,
+	CPL_ERR_CONN_RESET	   = 20,
+	CPL_ERR_CONN_EXIST_SYNRECV = 21,
+	CPL_ERR_CONN_EXIST	   = 22,
+	CPL_ERR_ARP_MISS	   = 23,
+	CPL_ERR_BAD_SYN		   = 24,
+	CPL_ERR_CONN_TIMEDOUT	   = 30,
+	CPL_ERR_XMIT_TIMEDOUT	   = 31,
+	CPL_ERR_PERSIST_TIMEDOUT   = 32,
+	CPL_ERR_FINWAIT2_TIMEDOUT  = 33,
+	CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+	CPL_ERR_RTX_NEG_ADVICE	   = 35,
+	CPL_ERR_PERSIST_NEG_ADVICE = 36,
+	CPL_ERR_KEEPALV_NEG_ADVICE = 37,
+	CPL_ERR_WAIT_ARP_RPL	   = 41,
+	CPL_ERR_ABORT_FAILED	   = 42,
+	CPL_ERR_IWARP_FLM	   = 50,
+};
+
+enum {
+	CPL_CONN_POLICY_AUTO = 0,
+	CPL_CONN_POLICY_ASK  = 1,
+	CPL_CONN_POLICY_FILTER = 2,
+	CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+	ULP_MODE_NONE	= 0,
+	ULP_MODE_ISCSI	= 2,
+	ULP_MODE_RDMA	= 4,
+	ULP_MODE_TCPDDP	= 5,
+	ULP_MODE_FCOE	= 6,
+};
+
+enum {
+	ULP_CRC_HEADER = 1 << 0,
+	ULP_CRC_DATA   = 1 << 1
+};
+
+enum {
+	CPL_PASS_OPEN_ACCEPT,
+	CPL_PASS_OPEN_REJECT,
+	CPL_PASS_OPEN_ACCEPT_TNL
+};
+
+enum {
+	CPL_ABORT_SEND_RST = 0,
+	CPL_ABORT_NO_RST,
+};
+
+enum {				/* TX_PKT_XT checksum types */
+	TX_CSUM_TCP	= 0,
+	TX_CSUM_UDP	= 1,
+	TX_CSUM_CRC16	= 4,
+	TX_CSUM_CRC32	= 5,
+	TX_CSUM_CRC32C	= 6,
+	TX_CSUM_FCOE	= 7,
+	TX_CSUM_TCPIP	= 8,
+	TX_CSUM_UDPIP	= 9,
+	TX_CSUM_TCPIP6	= 10,
+	TX_CSUM_UDPIP6	= 11,
+	TX_CSUM_IP	= 12,
+};
+
+enum {				/* packet type in CPL_RX_PKT */
+	PKTYPE_XACT_UCAST = 0,
+	PKTYPE_HASH_UCAST = 1,
+	PKTYPE_XACT_MCAST = 2,
+	PKTYPE_HASH_MCAST = 3,
+	PKTYPE_PROMISC	  = 4,
+	PKTYPE_HPROMISC	  = 5,
+	PKTYPE_BCAST	  = 6
+};
+
+enum {				/* DMAC type in CPL_RX_PKT */
+	DATYPE_UCAST,
+	DATYPE_MCAST,
+	DATYPE_BCAST
+};
+
+enum {				/* TCP congestion control algorithms */
+	CONG_ALG_RENO,
+	CONG_ALG_TAHOE,
+	CONG_ALG_NEWRENO,
+	CONG_ALG_HIGHSPEED
+};
+
+enum {				/* RSS hash type */
+	RSS_HASH_NONE = 0, /* no hash computed */
+	RSS_HASH_IP   = 1, /* IP or IPv6 2-tuple hash */
+	RSS_HASH_TCP  = 2, /* TCP 4-tuple hash */
+	RSS_HASH_UDP  = 3  /* UDP 4-tuple hash */
+};
+
+enum {				/* LE commands */
+	LE_CMD_READ  = 0x4,
+	LE_CMD_WRITE = 0xb
+};
+
+enum {				/* LE request size */
+	LE_SZ_NONE = 0,
+	LE_SZ_33   = 1,
+	LE_SZ_66   = 2,
+	LE_SZ_132  = 3,
+	LE_SZ_264  = 4,
+	LE_SZ_528  = 5
+};
+
+union opcode_tid {
+	__be32 opcode_tid;
+	__u8 opcode;
+};
+
+#define	S_CPL_OPCODE    24
+#define	V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+#define	G_CPL_OPCODE(x) (((x) >> S_CPL_OPCODE) & 0xFF)
+#define	G_TID(x)    ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define	MK_OPCODE_TID(opcode, tid) (V_CPL_OPCODE(opcode) | (tid))
+
+#define	OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define	GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+/* partitioning of TID fields that also carry a queue id */
+#define	S_TID_TID    0
+#define	M_TID_TID    0x3fff
+#define	V_TID_TID(x) ((x) << S_TID_TID)
+#define	G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID)
+
+#define	S_TID_QID    14
+#define	M_TID_QID    0x3ff
+#define	V_TID_QID(x) ((x) << S_TID_QID)
+#define	G_TID_QID(x) (((x) >> S_TID_QID) & M_TID_QID)
+
+union opcode_info {
+	__be64 opcode_info;
+	__u8 opcode;
+};
+
+struct tcp_options {
+	__be16 mss;
+	__u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 :4;
+	__u8 unknown:1;
+	__u8 :1;
+	__u8 sack:1;
+	__u8 tstamp:1;
+#else
+	__u8 tstamp:1;
+	__u8 sack:1;
+	__u8 :1;
+	__u8 unknown:1;
+	__u8 :4;
+#endif
+};
+
+struct rss_header {
+	__u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 channel:2;
+	__u8 filter_hit:1;
+	__u8 filter_tid:1;
+	__u8 hash_type:2;
+	__u8 ipv6:1;
+	__u8 send2fw:1;
+#else
+	__u8 send2fw:1;
+	__u8 ipv6:1;
+	__u8 hash_type:2;
+	__u8 filter_tid:1;
+	__u8 filter_hit:1;
+	__u8 channel:2;
+#endif
+	__be16 qid;
+	__be32 hash_val;
+};
+
+#define	S_HASHTYPE 20
+#define	M_HASHTYPE 0x3
+#define	G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
+
+#define	S_QNUM 0
+#define	M_QNUM 0xFFFF
+#define	G_QNUM(x) (((x) >> S_QNUM) & M_QNUM)
+
+#ifndef CHELSIO_FW
+struct work_request_hdr {
+	__be32 wr_hi;
+	__be32 wr_mid;
+	__be64 wr_lo;
+};
+
+/* wr_mid fields */
+#define	S_WR_LEN16    0
+#define	M_WR_LEN16    0xFF
+#define	V_WR_LEN16(x) ((x) << S_WR_LEN16)
+#define	G_WR_LEN16(x) (((x) >> S_WR_LEN16) & M_WR_LEN16)
+
+/* wr_hi fields */
+#define	S_WR_OP    24
+#define	M_WR_OP    0xFF
+#define	V_WR_OP(x) ((__u64)(x) << S_WR_OP)
+#define	G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
+
+#define	WR_HDR struct work_request_hdr wr
+#define	WR_HDR_SIZE sizeof (struct work_request_hdr)
+#define	RSS_HDR
+#else
+#define	WR_HDR
+#define	WR_HDR_SIZE 0
+#define	RSS_HDR struct rss_header rss_hdr;
+#endif
+
+/* option 0 fields */
+#define	S_ACCEPT_MODE    0
+#define	M_ACCEPT_MODE    0x3
+#define	V_ACCEPT_MODE(x) ((x) << S_ACCEPT_MODE)
+#define	G_ACCEPT_MODE(x) (((x) >> S_ACCEPT_MODE) & M_ACCEPT_MODE)
+
+#define	S_TX_CHAN    2
+#define	M_TX_CHAN    0x3
+#define	V_TX_CHAN(x) ((x) << S_TX_CHAN)
+#define	G_TX_CHAN(x) (((x) >> S_TX_CHAN) & M_TX_CHAN)
+
+#define	S_NO_CONG    4
+#define	V_NO_CONG(x) ((x) << S_NO_CONG)
+#define	F_NO_CONG    V_NO_CONG(1U)
+
+#define	S_DELACK    5
+#define	V_DELACK(x) ((x) << S_DELACK)
+#define	F_DELACK    V_DELACK(1U)
+
+#define	S_INJECT_TIMER    6
+#define	V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
+#define	F_INJECT_TIMER    V_INJECT_TIMER(1U)
+
+#define	S_NON_OFFLOAD    7
+#define	V_NON_OFFLOAD(x) ((x) << S_NON_OFFLOAD)
+#define	F_NON_OFFLOAD    V_NON_OFFLOAD(1U)
+
+#define	S_ULP_MODE    8
+#define	M_ULP_MODE    0xF
+#define	V_ULP_MODE(x) ((x) << S_ULP_MODE)
+#define	G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
+
+#define	S_RCV_BUFSIZ    12
+#define	M_RCV_BUFSIZ    0x3FFU
+#define	V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
+#define	G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
+
+#define	S_DSCP    22
+#define	M_DSCP    0x3F
+#define	V_DSCP(x) ((x) << S_DSCP)
+#define	G_DSCP(x) (((x) >> S_DSCP) & M_DSCP)
+
+#define	S_SMAC_SEL    28
+#define	M_SMAC_SEL    0xFF
+#define	V_SMAC_SEL(x) ((__u64)(x) << S_SMAC_SEL)
+#define	G_SMAC_SEL(x) (((x) >> S_SMAC_SEL) & M_SMAC_SEL)
+
+#define	S_L2T_IDX    36
+#define	M_L2T_IDX    0xFFF
+#define	V_L2T_IDX(x) ((__u64)(x) << S_L2T_IDX)
+#define	G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
+
+#define	S_TCAM_BYPASS    48
+#define	V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS)
+#define	F_TCAM_BYPASS    V_TCAM_BYPASS(1ULL)
+
+#define	S_NAGLE    49
+#define	V_NAGLE(x) ((__u64)(x) << S_NAGLE)
+#define	F_NAGLE    V_NAGLE(1ULL)
+
+#define	S_WND_SCALE    50
+#define	M_WND_SCALE    0xF
+#define	V_WND_SCALE(x) ((__u64)(x) << S_WND_SCALE)
+#define	G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
+
+#define	S_KEEP_ALIVE    54
+#define	V_KEEP_ALIVE(x) ((__u64)(x) << S_KEEP_ALIVE)
+#define	F_KEEP_ALIVE    V_KEEP_ALIVE(1ULL)
+
+#define	S_MAX_RT    55
+#define	M_MAX_RT    0xF
+#define	V_MAX_RT(x) ((__u64)(x) << S_MAX_RT)
+#define	G_MAX_RT(x) (((x) >> S_MAX_RT) & M_MAX_RT)
+
+#define	S_MAX_RT_OVERRIDE    59
+#define	V_MAX_RT_OVERRIDE(x) ((__u64)(x) << S_MAX_RT_OVERRIDE)
+#define	F_MAX_RT_OVERRIDE    V_MAX_RT_OVERRIDE(1ULL)
+
+#define	S_MSS_IDX    60
+#define	M_MSS_IDX    0xF
+#define	V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX)
+#define	G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 1 fields */
+#define	S_SYN_RSS_ENABLE    0
+#define	V_SYN_RSS_ENABLE(x) ((x) << S_SYN_RSS_ENABLE)
+#define	F_SYN_RSS_ENABLE    V_SYN_RSS_ENABLE(1U)
+
+#define	S_SYN_RSS_USE_HASH    1
+#define	V_SYN_RSS_USE_HASH(x) ((x) << S_SYN_RSS_USE_HASH)
+#define	F_SYN_RSS_USE_HASH    V_SYN_RSS_USE_HASH(1U)
+
+#define	S_SYN_RSS_QUEUE    2
+#define	M_SYN_RSS_QUEUE    0x3FF
+#define	V_SYN_RSS_QUEUE(x) ((x) << S_SYN_RSS_QUEUE)
+#define	G_SYN_RSS_QUEUE(x) (((x) >> S_SYN_RSS_QUEUE) & M_SYN_RSS_QUEUE)
+
+#define	S_LISTEN_INTF    12
+#define	M_LISTEN_INTF    0xFF
+#define	V_LISTEN_INTF(x) ((x) << S_LISTEN_INTF)
+#define	G_LISTEN_INTF(x) (((x) >> S_LISTEN_INTF) & M_LISTEN_INTF)
+
+#define	S_LISTEN_FILTER    20
+#define	V_LISTEN_FILTER(x) ((x) << S_LISTEN_FILTER)
+#define	F_LISTEN_FILTER    V_LISTEN_FILTER(1U)
+
+#define	S_SYN_DEFENSE    21
+#define	V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
+#define	F_SYN_DEFENSE    V_SYN_DEFENSE(1U)
+
+#define	S_CONN_POLICY    22
+#define	M_CONN_POLICY    0x3
+#define	V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
+#define	G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
+
+/* option 2 fields */
+#define	S_RSS_QUEUE    0
+#define	M_RSS_QUEUE    0x3FF
+#define	V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+#define	G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE)
+
+#define	S_RSS_QUEUE_VALID    10
+#define	V_RSS_QUEUE_VALID(x) ((x) << S_RSS_QUEUE_VALID)
+#define	F_RSS_QUEUE_VALID    V_RSS_QUEUE_VALID(1U)
+
+#define	S_RX_COALESCE_VALID    11
+#define	V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
+#define	F_RX_COALESCE_VALID    V_RX_COALESCE_VALID(1U)
+
+#define	S_RX_COALESCE    12
+#define	M_RX_COALESCE    0x3
+#define	V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
+#define	G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
+
+#define	S_CONG_CNTRL    14
+#define	M_CONG_CNTRL    0x3
+#define	V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+#define	G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+
+#define	S_PACE    16
+#define	M_PACE    0x3
+#define	V_PACE(x) ((x) << S_PACE)
+#define	G_PACE(x) (((x) >> S_PACE) & M_PACE)
+
+#define	S_CONG_CNTRL_VALID    18
+#define	V_CONG_CNTRL_VALID(x) ((x) << S_CONG_CNTRL_VALID)
+#define	F_CONG_CNTRL_VALID    V_CONG_CNTRL_VALID(1U)
+
+#define	S_PACE_VALID    19
+#define	V_PACE_VALID(x) ((x) << S_PACE_VALID)
+#define	F_PACE_VALID    V_PACE_VALID(1U)
+
+#define	S_RX_FC_DISABLE    20
+#define	V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
+#define	F_RX_FC_DISABLE    V_RX_FC_DISABLE(1U)
+
+#define	S_RX_FC_DDP    21
+#define	V_RX_FC_DDP(x) ((x) << S_RX_FC_DDP)
+#define	F_RX_FC_DDP    V_RX_FC_DDP(1U)
+
+#define	S_RX_FC_VALID    22
+#define	V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
+#define	F_RX_FC_VALID    V_RX_FC_VALID(1U)
+
+#define	S_TX_QUEUE    23
+#define	M_TX_QUEUE    0x7
+#define	V_TX_QUEUE(x) ((x) << S_TX_QUEUE)
+#define	G_TX_QUEUE(x) (((x) >> S_TX_QUEUE) & M_TX_QUEUE)
+
+#define	S_RX_CHANNEL    26
+#define	V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
+#define	F_RX_CHANNEL    V_RX_CHANNEL(1U)
+
+#define	S_CCTRL_ECN    27
+#define	V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
+#define	F_CCTRL_ECN    V_CCTRL_ECN(1U)
+
+#define	S_WND_SCALE_EN    28
+#define	V_WND_SCALE_EN(x) ((x) << S_WND_SCALE_EN)
+#define	F_WND_SCALE_EN    V_WND_SCALE_EN(1U)
+
+#define	S_TSTAMPS_EN    29
+#define	V_TSTAMPS_EN(x) ((x) << S_TSTAMPS_EN)
+#define	F_TSTAMPS_EN    V_TSTAMPS_EN(1U)
+
+#define	S_SACK_EN    30
+#define	V_SACK_EN(x) ((x) << S_SACK_EN)
+#define	F_SACK_EN    V_SACK_EN(1U)
+
+struct cpl_pass_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be64 opt0;
+	__be64 opt1;
+};
+
+struct cpl_pass_open_req6 {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be64 local_ip_hi;
+	__be64 local_ip_lo;
+	__be64 peer_ip_hi;
+	__be64 peer_ip_lo;
+	__be64 opt0;
+	__be64 opt1;
+};
+
+struct cpl_pass_open_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 rsvd[3];
+	__u8 status;
+};
+
+struct cpl_pass_establish {
+	RSS_HDR
+	union opcode_tid ot;
+	__be32 rsvd;
+	__be32 tos_stid;
+	__be16 mac_idx;
+	__be16 tcp_opt;
+	__be32 snd_isn;
+	__be32 rcv_isn;
+};
+
+/* cpl_pass_establish.tos_stid fields */
+#define	S_PASS_OPEN_TID    0
+#define	M_PASS_OPEN_TID    0xFFFFFF
+#define	V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
+#define	G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
+
+#define	S_PASS_OPEN_TOS    24
+#define	M_PASS_OPEN_TOS    0xFF
+#define	V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
+#define	G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
+
+/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
+#define	G_TCPOPT_WSCALE_OK(x)	(((x) >> 5) & 1)
+#define	G_TCPOPT_SACK(x)	(((x) >> 6) & 1)
+#define	G_TCPOPT_TSTAMP(x)	(((x) >> 7) & 1)
+#define	G_TCPOPT_SND_WSCALE(x)	(((x) >> 8) & 0xf)
+#define	G_TCPOPT_MSS(x)		(((x) >> 12) & 0xf)
+
+struct cpl_pass_accept_req {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+	__be32 hdr_len;
+	__be16 vlan;
+	__be16 l2info;
+	__be32 tos_stid;
+	struct tcp_options tcpopt;
+};
+
+/* cpl_pass_accept_req.hdr_len fields */
+#define	S_SYN_RX_CHAN    0
+#define	M_SYN_RX_CHAN    0xF
+#define	V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
+#define	G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
+
+#define	S_TCP_HDR_LEN    10
+#define	M_TCP_HDR_LEN    0x3F
+#define	V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
+#define	G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
+
+#define	S_IP_HDR_LEN    16
+#define	M_IP_HDR_LEN    0x3FF
+#define	V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
+#define	G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
+
+#define	S_ETH_HDR_LEN    26
+#define	M_ETH_HDR_LEN    0x1F
+#define	V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
+#define	G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
+
+/* cpl_pass_accept_req.l2info fields */
+#define	S_SYN_MAC_IDX    0
+#define	M_SYN_MAC_IDX    0x1FF
+#define	V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
+#define	G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
+
+#define	S_SYN_XACT_MATCH    9
+#define	V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
+#define	F_SYN_XACT_MATCH    V_SYN_XACT_MATCH(1U)
+
+#define	S_SYN_INTF    12
+#define	M_SYN_INTF    0xF
+#define	V_SYN_INTF(x) ((x) << S_SYN_INTF)
+#define	G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+
+struct cpl_pass_accept_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 opt2;
+	__be64 opt0;
+};
+
+struct cpl_act_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be64 opt0;
+	__be32 params;
+	__be32 opt2;
+};
+
+struct cpl_act_open_req6 {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be64 local_ip_hi;
+	__be64 local_ip_lo;
+	__be64 peer_ip_hi;
+	__be64 peer_ip_lo;
+	__be64 opt0;
+	__be32 params;
+	__be32 opt2;
+};
+
+struct cpl_act_open_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__be32 atid_status;
+};
+
+/* cpl_act_open_rpl.atid_status fields */
+#define	S_AOPEN_STATUS    0
+#define	M_AOPEN_STATUS    0xFF
+#define	V_AOPEN_STATUS(x) ((x) << S_AOPEN_STATUS)
+#define	G_AOPEN_STATUS(x) (((x) >> S_AOPEN_STATUS) & M_AOPEN_STATUS)
+
+#define	S_AOPEN_ATID    8
+#define	M_AOPEN_ATID    0xFFFFFF
+#define	V_AOPEN_ATID(x) ((x) << S_AOPEN_ATID)
+#define	G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID)
+
+struct cpl_act_establish {
+	RSS_HDR
+	union opcode_tid ot;
+	__be32 rsvd;
+	__be32 tos_atid;
+	__be16 mac_idx;
+	__be16 tcp_opt;
+	__be32 snd_isn;
+	__be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 cookie;
+};
+
+/* cpl_get_tcb.reply_ctrl fields */
+#define	S_QUEUENO    0
+#define	M_QUEUENO    0x3FF
+#define	V_QUEUENO(x) ((x) << S_QUEUENO)
+#define	G_QUEUENO(x) (((x) >> S_QUEUENO) & M_QUEUENO)
+
+#define	S_REPLY_CHAN    14
+#define	V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
+#define	F_REPLY_CHAN    V_REPLY_CHAN(1U)
+
+#define	S_NO_REPLY    15
+#define	V_NO_REPLY(x) ((x) << S_NO_REPLY)
+#define	F_NO_REPLY    V_NO_REPLY(1U)
+
+struct cpl_get_tcb_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 cookie;
+	__u8 status;
+	__be16 len;
+};
+
+struct cpl_set_tcb {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 cookie;
+};
+
+struct cpl_set_tcb_field {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 word_cookie;
+	__be64 mask;
+	__be64 val;
+};
+
+/* cpl_set_tcb_field.word_cookie fields */
+#define	S_WORD    0
+#define	M_WORD    0x1F
+#define	V_WORD(x) ((x) << S_WORD)
+#define	G_WORD(x) (((x) >> S_WORD) & M_WORD)
+
+#define	S_COOKIE    5
+#define	M_COOKIE    0x7
+#define	V_COOKIE(x) ((x) << S_COOKIE)
+#define	G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
+
+struct cpl_set_tcb_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 rsvd;
+	__u8   cookie;
+	__u8   status;
+	__be64 oldval;
+};
+
+struct cpl_close_con_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8  rsvd[3];
+	__u8  status;
+	__be32 snd_nxt;
+	__be32 rcv_nxt;
+};
+
+struct cpl_close_listsvr_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 rsvd;
+};
+
+/* additional cpl_close_listsvr_req.reply_ctrl field */
+#define	S_LISTSVR_IPV6    14
+#define	V_LISTSVR_IPV6(x) ((x) << S_LISTSVR_IPV6)
+#define	F_LISTSVR_IPV6    V_LISTSVR_IPV6(1U)
+
+struct cpl_close_listsvr_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 rsvd[3];
+	__u8 status;
+};
+
+struct cpl_abort_req_rss {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8  rsvd[3];
+	__u8  status;
+};
+
+struct cpl_abort_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd0;
+	__u8  rsvd1;
+	__u8  cmd;
+	__u8  rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8  rsvd[3];
+	__u8  status;
+};
+
+struct cpl_abort_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd0;
+	__u8  rsvd1;
+	__u8  cmd;
+	__u8  rsvd2[6];
+};
+
+struct cpl_peer_close {
+	RSS_HDR
+	union opcode_tid ot;
+	__be32 rcv_nxt;
+};
+
+struct cpl_tid_release {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd;
+};
+
+struct tx_data_wr {
+	__be32 wr_hi;
+	__be32 wr_lo;
+	__be32 len;
+	__be32 flags;
+	__be32 sndseq;
+	__be32 param;
+};
+
+/* tx_data_wr.flags fields */
+#define	S_TX_ACK_PAGES    21
+#define	M_TX_ACK_PAGES    0x7
+#define	V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES)
+#define	G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES)
+
+/* tx_data_wr.param fields */
+#define	S_TX_PORT    0
+#define	M_TX_PORT    0x7
+#define	V_TX_PORT(x) ((x) << S_TX_PORT)
+#define	G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
+
+#define	S_TX_MSS    4
+#define	M_TX_MSS    0xF
+#define	V_TX_MSS(x) ((x) << S_TX_MSS)
+#define	G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
+
+#define	S_TX_QOS    8
+#define	M_TX_QOS    0xFF
+#define	V_TX_QOS(x) ((x) << S_TX_QOS)
+#define	G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
+
+#define	S_TX_SNDBUF 16
+#define	M_TX_SNDBUF 0xFFFF
+#define	V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
+#define	G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
+
+struct cpl_tx_data {
+	union opcode_tid ot;
+	__be32 len;
+	__be32 rsvd;
+	__be32 flags;
+};
+
+/* cpl_tx_data.flags fields */
+#define	S_TX_PROXY    5
+#define	V_TX_PROXY(x) ((x) << S_TX_PROXY)
+#define	F_TX_PROXY    V_TX_PROXY(1U)
+
+#define	S_TX_ULP_SUBMODE    6
+#define	M_TX_ULP_SUBMODE    0xF
+#define	V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
+#define	G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
+
+#define	S_TX_ULP_MODE    10
+#define	M_TX_ULP_MODE    0xF
+#define	V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
+#define	G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
+
+#define	S_TX_SHOVE    14
+#define	V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
+#define	F_TX_SHOVE    V_TX_SHOVE(1U)
+
+#define	S_TX_MORE    15
+#define	V_TX_MORE(x) ((x) << S_TX_MORE)
+#define	F_TX_MORE    V_TX_MORE(1U)
+
+#define	S_TX_URG    16
+#define	V_TX_URG(x) ((x) << S_TX_URG)
+#define	F_TX_URG    V_TX_URG(1U)
+
+#define	S_TX_FLUSH    17
+#define	V_TX_FLUSH(x) ((x) << S_TX_FLUSH)
+#define	F_TX_FLUSH    V_TX_FLUSH(1U)
+
+#define	S_TX_SAVE    18
+#define	V_TX_SAVE(x) ((x) << S_TX_SAVE)
+#define	F_TX_SAVE    V_TX_SAVE(1U)
+
+#define	S_TX_TNL    19
+#define	V_TX_TNL(x) ((x) << S_TX_TNL)
+#define	F_TX_TNL    V_TX_TNL(1U)
+
+/* additional tx_data_wr.flags fields */
+#define	S_TX_CPU_IDX    0
+#define	M_TX_CPU_IDX    0x3F
+#define	V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
+#define	G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
+
+#define	S_TX_CLOSE    17
+#define	V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
+#define	F_TX_CLOSE    V_TX_CLOSE(1U)
+
+#define	S_TX_INIT    18
+#define	V_TX_INIT(x) ((x) << S_TX_INIT)
+#define	F_TX_INIT    V_TX_INIT(1U)
+
+#define	S_TX_IMM_ACK    19
+#define	V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
+#define	F_TX_IMM_ACK    V_TX_IMM_ACK(1U)
+
+#define	S_TX_IMM_DMA    20
+#define	V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
+#define	F_TX_IMM_DMA    V_TX_IMM_DMA(1U)
+
+struct cpl_tx_data_ack {
+	RSS_HDR
+	union opcode_tid ot;
+	__be32 snd_una;
+};
+
+struct cpl_wr_ack {  /* TODO */
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 credits;
+	__be16 rsvd;
+	__be32 snd_nxt;
+	__be32 snd_una;
+};
+
+struct cpl_tx_pkt_core {
+	__be32 ctrl0;
+	__be16 pack;
+	__be16 len;
+	__be64 ctrl1;
+};
+
+struct cpl_tx_pkt {
+	WR_HDR;
+	struct cpl_tx_pkt_core c;
+};
+
+#define	cpl_tx_pkt_xt cpl_tx_pkt
+
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define	S_TXPKT_VF    0
+#define	M_TXPKT_VF    0xFF
+#define	V_TXPKT_VF(x) ((x) << S_TXPKT_VF)
+#define	G_TXPKT_VF(x) (((x) >> S_TXPKT_VF) & M_TXPKT_VF)
+
+#define	S_TXPKT_PF    8
+#define	M_TXPKT_PF    0x7
+#define	V_TXPKT_PF(x) ((x) << S_TXPKT_PF)
+#define	G_TXPKT_PF(x) (((x) >> S_TXPKT_PF) & M_TXPKT_PF)
+
+#define	S_TXPKT_VF_VLD    11
+#define	V_TXPKT_VF_VLD(x) ((x) << S_TXPKT_VF_VLD)
+#define	F_TXPKT_VF_VLD    V_TXPKT_VF_VLD(1U)
+
+#define	S_TXPKT_OVLAN_IDX    12
+#define	M_TXPKT_OVLAN_IDX    0xF
+#define	V_TXPKT_OVLAN_IDX(x) ((x) << S_TXPKT_OVLAN_IDX)
+#define	G_TXPKT_OVLAN_IDX(x) (((x) >> S_TXPKT_OVLAN_IDX) & M_TXPKT_OVLAN_IDX)
+
+#define	S_TXPKT_INTF    16
+#define	M_TXPKT_INTF    0xF
+#define	V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
+#define	G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
+
+#define	S_TXPKT_SPECIAL_STAT    20
+#define	V_TXPKT_SPECIAL_STAT(x) ((x) << S_TXPKT_SPECIAL_STAT)
+#define	F_TXPKT_SPECIAL_STAT    V_TXPKT_SPECIAL_STAT(1U)
+
+#define	S_TXPKT_INS_OVLAN    21
+#define	V_TXPKT_INS_OVLAN(x) ((x) << S_TXPKT_INS_OVLAN)
+#define	F_TXPKT_INS_OVLAN    V_TXPKT_INS_OVLAN(1U)
+
+#define	S_TXPKT_STAT_DIS    22
+#define	V_TXPKT_STAT_DIS(x) ((x) << S_TXPKT_STAT_DIS)
+#define	F_TXPKT_STAT_DIS    V_TXPKT_STAT_DIS(1U)
+
+#define	S_TXPKT_LOOPBACK    23
+#define	V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
+#define	F_TXPKT_LOOPBACK    V_TXPKT_LOOPBACK(1U)
+
+#define	S_TXPKT_OPCODE    24
+#define	M_TXPKT_OPCODE    0xFF
+#define	V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
+#define	G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define	S_TXPKT_SA_IDX    0
+#define	M_TXPKT_SA_IDX    0xFFF
+#define	V_TXPKT_SA_IDX(x) ((x) << S_TXPKT_SA_IDX)
+#define	G_TXPKT_SA_IDX(x) (((x) >> S_TXPKT_SA_IDX) & M_TXPKT_SA_IDX)
+
+#define	S_TXPKT_CSUM_END    12
+#define	M_TXPKT_CSUM_END    0xFF
+#define	V_TXPKT_CSUM_END(x) ((x) << S_TXPKT_CSUM_END)
+#define	G_TXPKT_CSUM_END(x) (((x) >> S_TXPKT_CSUM_END) & M_TXPKT_CSUM_END)
+
+#define	S_TXPKT_CSUM_START    20
+#define	M_TXPKT_CSUM_START    0x3FF
+#define	V_TXPKT_CSUM_START(x) ((x) << S_TXPKT_CSUM_START)
+#define	G_TXPKT_CSUM_START(x) (((x) >> S_TXPKT_CSUM_START) & M_TXPKT_CSUM_START)
+
+#define	S_TXPKT_IPHDR_LEN    20
+#define	M_TXPKT_IPHDR_LEN    0x3FFF
+#define	V_TXPKT_IPHDR_LEN(x) ((__u64)(x) << S_TXPKT_IPHDR_LEN)
+#define	G_TXPKT_IPHDR_LEN(x) (((x) >> S_TXPKT_IPHDR_LEN) & M_TXPKT_IPHDR_LEN)
+
+#define	S_TXPKT_CSUM_LOC    30
+#define	M_TXPKT_CSUM_LOC    0x3FF
+#define	V_TXPKT_CSUM_LOC(x) ((__u64)(x) << S_TXPKT_CSUM_LOC)
+#define	G_TXPKT_CSUM_LOC(x) (((x) >> S_TXPKT_CSUM_LOC) & M_TXPKT_CSUM_LOC)
+
+#define	S_TXPKT_ETHHDR_LEN    34
+#define	M_TXPKT_ETHHDR_LEN    0x3F
+#define	V_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_TXPKT_ETHHDR_LEN)
+#define	G_TXPKT_ETHHDR_LEN(x) (((x) >> S_TXPKT_ETHHDR_LEN) & M_TXPKT_ETHHDR_LEN)
+
+#define	S_TXPKT_CSUM_TYPE    40
+#define	M_TXPKT_CSUM_TYPE    0xF
+#define	V_TXPKT_CSUM_TYPE(x) ((__u64)(x) << S_TXPKT_CSUM_TYPE)
+#define	G_TXPKT_CSUM_TYPE(x) (((x) >> S_TXPKT_CSUM_TYPE) & M_TXPKT_CSUM_TYPE)
+
+#define	S_TXPKT_VLAN    44
+#define	M_TXPKT_VLAN    0xFFFF
+#define	V_TXPKT_VLAN(x) ((__u64)(x) << S_TXPKT_VLAN)
+#define	G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
+
+#define	S_TXPKT_VLAN_VLD    60
+#define	V_TXPKT_VLAN_VLD(x) ((__u64)(x) << S_TXPKT_VLAN_VLD)
+#define	F_TXPKT_VLAN_VLD    V_TXPKT_VLAN_VLD(1ULL)
+
+#define	S_TXPKT_IPSEC    61
+#define	V_TXPKT_IPSEC(x) ((__u64)(x) << S_TXPKT_IPSEC)
+#define	F_TXPKT_IPSEC    V_TXPKT_IPSEC(1ULL)
+
+#define	S_TXPKT_IPCSUM_DIS    62
+#define	V_TXPKT_IPCSUM_DIS(x) ((__u64)(x) << S_TXPKT_IPCSUM_DIS)
+#define	F_TXPKT_IPCSUM_DIS    V_TXPKT_IPCSUM_DIS(1ULL)
+
+#define	S_TXPKT_L4CSUM_DIS    63
+#define	V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS)
+#define	F_TXPKT_L4CSUM_DIS    V_TXPKT_L4CSUM_DIS(1ULL)
+
+struct cpl_tx_pkt_lso {
+	__be32 lso_ctrl;
+	__be16 ipid_ofst;
+	__be16 mss;
+	__be32 seqno_offset;
+	__be32 len;
+	/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+/* cpl_tx_pkt_lso.lso_ctrl fields */
+#define	S_LSO_TCPHDR_LEN    0
+#define	M_LSO_TCPHDR_LEN    0xF
+#define	V_LSO_TCPHDR_LEN(x) ((x) << S_LSO_TCPHDR_LEN)
+#define	G_LSO_TCPHDR_LEN(x) (((x) >> S_LSO_TCPHDR_LEN) & M_LSO_TCPHDR_LEN)
+
+#define	S_LSO_IPHDR_LEN    4
+#define	M_LSO_IPHDR_LEN    0xFFF
+#define	V_LSO_IPHDR_LEN(x) ((x) << S_LSO_IPHDR_LEN)
+#define	G_LSO_IPHDR_LEN(x) (((x) >> S_LSO_IPHDR_LEN) & M_LSO_IPHDR_LEN)
+
+#define	S_LSO_ETHHDR_LEN    16
+#define	M_LSO_ETHHDR_LEN    0xF
+#define	V_LSO_ETHHDR_LEN(x) ((x) << S_LSO_ETHHDR_LEN)
+#define	G_LSO_ETHHDR_LEN(x) (((x) >> S_LSO_ETHHDR_LEN) & M_LSO_ETHHDR_LEN)
+
+#define	S_LSO_IPV6    20
+#define	V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
+#define	F_LSO_IPV6    V_LSO_IPV6(1U)
+
+#define	S_LSO_OFLD_ENCAP    21
+#define	V_LSO_OFLD_ENCAP(x) ((x) << S_LSO_OFLD_ENCAP)
+#define	F_LSO_OFLD_ENCAP    V_LSO_OFLD_ENCAP(1U)
+
+#define	S_LSO_LAST_SLICE    22
+#define	V_LSO_LAST_SLICE(x) ((x) << S_LSO_LAST_SLICE)
+#define	F_LSO_LAST_SLICE    V_LSO_LAST_SLICE(1U)
+
+#define	S_LSO_FIRST_SLICE    23
+#define	V_LSO_FIRST_SLICE(x) ((x) << S_LSO_FIRST_SLICE)
+#define	F_LSO_FIRST_SLICE    V_LSO_FIRST_SLICE(1U)
+
+#define	S_LSO_OPCODE    24
+#define	M_LSO_OPCODE    0xFF
+#define	V_LSO_OPCODE(x) ((x) << S_LSO_OPCODE)
+#define	G_LSO_OPCODE(x) (((x) >> S_LSO_OPCODE) & M_LSO_OPCODE)
+
+/* cpl_tx_pkt_lso.mss fields */
+#define	S_LSO_MSS    0
+#define	M_LSO_MSS    0x3FFF
+#define	V_LSO_MSS(x) ((x) << S_LSO_MSS)
+#define	G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
+
+#define	S_LSO_IPID_SPLIT    15
+#define	V_LSO_IPID_SPLIT(x) ((x) << S_LSO_IPID_SPLIT)
+#define	F_LSO_IPID_SPLIT    V_LSO_IPID_SPLIT(1U)
+
+struct cpl_tx_pkt_coalesce {
+	__be32 cntrl;
+	__be32 len;
+	__be64 addr;
+};
+
+struct tx_pkt_coalesce_wr {
+	WR_HDR;
+#if !(defined C99_NOT_SUPPORTED)
+	struct cpl_tx_pkt_coalesce cpl[];
+#endif
+};
+
+struct mngt_pktsched_wr {
+	__be32 wr_hi;
+	__be32 wr_lo;
+	__u8  mngt_opcode;
+	__u8  rsvd[7];
+	__u8  sched;
+	__u8  idx;
+	__u8  min;
+	__u8  max;
+	__u8  binding;
+	__u8  rsvd1[3];
+};
+
+struct cpl_iscsi_hdr_no_rss {
+	union opcode_tid ot;
+	__be16 pdu_len_ddp;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+	__u8 rsvd;
+	__u8 status;
+};
+
+struct cpl_iscsi_hdr {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 pdu_len_ddp;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+	__u8 rsvd;
+	__u8 status;
+};
+
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define	S_ISCSI_PDU_LEN    0
+#define	M_ISCSI_PDU_LEN    0x7FFF
+#define	V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
+#define	G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
+
+#define	S_ISCSI_DDP    15
+#define	V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
+#define	F_ISCSI_DDP    V_ISCSI_DDP(1U)
+
+struct cpl_rx_data {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 dack_mode:2;
+	__u8 psh:1;
+	__u8 heartbeat:1;
+	__u8 ddp_off:1;
+	__u8 :3;
+#else
+	__u8 :3;
+	__u8 ddp_off:1;
+	__u8 heartbeat:1;
+	__u8 psh:1;
+	__u8 dack_mode:2;
+#endif
+	__u8 status;
+};
+
+struct cpl_fcoe_hdr {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 oxid;
+	__be16 len;
+	__be32 rctl_fctl;
+	__u8 cs_ctl;
+	__u8 df_ctl;
+	__u8 sof;
+	__u8 eof;
+	__be16 seq_cnt;
+	__u8 seq_id;
+	__u8 type;
+	__be32 param;
+};
+
+struct cpl_rx_urg_notify {
+	RSS_HDR
+	union opcode_tid ot;
+	__be32 seq;
+};
+
+struct cpl_rx_urg_pkt {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+};
+
+struct cpl_rx_data_ack {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 credit_dack;
+};
+
+/* cpl_rx_data_ack.ack_seq fields */
+#define	S_RX_CREDITS    0
+#define	M_RX_CREDITS    0x3FFFFFF
+#define	V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
+#define	G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
+
+#define	S_RX_MODULATE_TX    26
+#define	V_RX_MODULATE_TX(x) ((x) << S_RX_MODULATE_TX)
+#define	F_RX_MODULATE_TX    V_RX_MODULATE_TX(1U)
+
+#define	S_RX_MODULATE_RX    27
+#define	V_RX_MODULATE_RX(x) ((x) << S_RX_MODULATE_RX)
+#define	F_RX_MODULATE_RX    V_RX_MODULATE_RX(1U)
+
+#define	S_RX_FORCE_ACK    28
+#define	V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
+#define	F_RX_FORCE_ACK    V_RX_FORCE_ACK(1U)
+
+#define	S_RX_DACK_MODE    29
+#define	M_RX_DACK_MODE    0x3
+#define	V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
+#define	G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+
+#define	S_RX_DACK_CHANGE    31
+#define	V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
+#define	F_RX_DACK_CHANGE    V_RX_DACK_CHANGE(1U)
+
+struct cpl_rx_ddp_complete {
+	RSS_HDR
+	union opcode_tid ot;
+	__be32 ddp_report;
+	__be32 rcv_nxt;
+	__be32 rsvd;
+};
+
+struct cpl_rx_data_ddp {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 urg;
+	__be16 len;
+	__be32 seq;
+	union {
+		__be32 nxt_seq;
+		__be32 ddp_report;
+	} u;
+	__be32 ulp_crc;
+	__be32 ddpvld;
+};
+
+struct cpl_rx_fcoe_ddp {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+	__be32 seq;
+	__be32 ddp_report;
+	__be32 ulp_crc;
+	__be32 ddpvld;
+};
+
+/* cpl_rx_{data,fcoe}_ddp.ddpvld fields */
+#define	S_DDP_VALID    15
+#define	M_DDP_VALID    0x1FFFF
+#define	V_DDP_VALID(x) ((x) << S_DDP_VALID)
+#define	G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
+
+#define	S_DDP_PPOD_MISMATCH    15
+#define	V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
+#define	F_DDP_PPOD_MISMATCH    V_DDP_PPOD_MISMATCH(1U)
+
+#define	S_DDP_PDU    16
+#define	V_DDP_PDU(x) ((x) << S_DDP_PDU)
+#define	F_DDP_PDU    V_DDP_PDU(1U)
+
+#define	S_DDP_LLIMIT_ERR    17
+#define	V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
+#define	F_DDP_LLIMIT_ERR    V_DDP_LLIMIT_ERR(1U)
+
+#define	S_DDP_PPOD_PARITY_ERR    18
+#define	V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
+#define	F_DDP_PPOD_PARITY_ERR    V_DDP_PPOD_PARITY_ERR(1U)
+
+#define	S_DDP_PADDING_ERR    19
+#define	V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
+#define	F_DDP_PADDING_ERR    V_DDP_PADDING_ERR(1U)
+
+#define	S_DDP_HDRCRC_ERR    20
+#define	V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
+#define	F_DDP_HDRCRC_ERR    V_DDP_HDRCRC_ERR(1U)
+
+#define	S_DDP_DATACRC_ERR    21
+#define	V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
+#define	F_DDP_DATACRC_ERR    V_DDP_DATACRC_ERR(1U)
+
+#define	S_DDP_INVALID_TAG    22
+#define	V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
+#define	F_DDP_INVALID_TAG    V_DDP_INVALID_TAG(1U)
+
+#define	S_DDP_ULIMIT_ERR    23
+#define	V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
+#define	F_DDP_ULIMIT_ERR    V_DDP_ULIMIT_ERR(1U)
+
+#define	S_DDP_OFFSET_ERR    24
+#define	V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
+#define	F_DDP_OFFSET_ERR    V_DDP_OFFSET_ERR(1U)
+
+#define	S_DDP_COLOR_ERR    25
+#define	V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
+#define	F_DDP_COLOR_ERR    V_DDP_COLOR_ERR(1U)
+
+#define	S_DDP_TID_MISMATCH    26
+#define	V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
+#define	F_DDP_TID_MISMATCH    V_DDP_TID_MISMATCH(1U)
+
+#define	S_DDP_INVALID_PPOD    27
+#define	V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
+#define	F_DDP_INVALID_PPOD    V_DDP_INVALID_PPOD(1U)
+
+#define	S_DDP_ULP_MODE    28
+#define	M_DDP_ULP_MODE    0xF
+#define	V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
+#define	G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
+
+/* cpl_rx_{data,fcoe}_ddp.ddp_report fields */
+#define	S_DDP_OFFSET    0
+#define	M_DDP_OFFSET    0xFFFFFF
+#define	V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
+#define	G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
+
+#define	S_DDP_DACK_MODE    24
+#define	M_DDP_DACK_MODE    0x3
+#define	V_DDP_DACK_MODE(x) ((x) << S_DDP_DACK_MODE)
+#define	G_DDP_DACK_MODE(x) (((x) >> S_DDP_DACK_MODE) & M_DDP_DACK_MODE)
+
+#define	S_DDP_BUF_IDX    26
+#define	V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
+#define	F_DDP_BUF_IDX    V_DDP_BUF_IDX(1U)
+
+#define	S_DDP_URG    27
+#define	V_DDP_URG(x) ((x) << S_DDP_URG)
+#define	F_DDP_URG    V_DDP_URG(1U)
+
+#define	S_DDP_PSH    28
+#define	V_DDP_PSH(x) ((x) << S_DDP_PSH)
+#define	F_DDP_PSH    V_DDP_PSH(1U)
+
+#define	S_DDP_BUF_COMPLETE    29
+#define	V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
+#define	F_DDP_BUF_COMPLETE    V_DDP_BUF_COMPLETE(1U)
+
+#define	S_DDP_BUF_TIMED_OUT    30
+#define	V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
+#define	F_DDP_BUF_TIMED_OUT    V_DDP_BUF_TIMED_OUT(1U)
+
+#define	S_DDP_INV    31
+#define	V_DDP_INV(x) ((x) << S_DDP_INV)
+#define	F_DDP_INV    V_DDP_INV(1U)
+
+struct cpl_rx_pkt {
+	RSS_HDR
+	__u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 iff:4;
+	__u8 csum_calc:1;
+	__u8 ipmi_pkt:1;
+	__u8 vlan_ex:1;
+	__u8 ip_frag:1;
+#else
+	__u8 ip_frag:1;
+	__u8 vlan_ex:1;
+	__u8 ipmi_pkt:1;
+	__u8 csum_calc:1;
+	__u8 iff:4;
+#endif
+	__be16 csum;
+	__be16 vlan;
+	__be16 len;
+	__be32 l2info;
+	__be16 hdr_len;
+	__be16 err_vec;
+};
+
+/* rx_pkt.l2info fields */
+#define	S_RX_ETHHDR_LEN    0
+#define	M_RX_ETHHDR_LEN    0x1F
+#define	V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
+#define	G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
+
+#define	S_RX_PKTYPE    5
+#define	M_RX_PKTYPE    0x7
+#define	V_RX_PKTYPE(x) ((x) << S_RX_PKTYPE)
+#define	G_RX_PKTYPE(x) (((x) >> S_RX_PKTYPE) & M_RX_PKTYPE)
+
+#define	S_RX_MACIDX    8
+#define	M_RX_MACIDX    0x1FF
+#define	V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
+#define	G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
+
+#define	S_RX_DATYPE    18
+#define	M_RX_DATYPE    0x3
+#define	V_RX_DATYPE(x) ((x) << S_RX_DATYPE)
+#define	G_RX_DATYPE(x) (((x) >> S_RX_DATYPE) & M_RX_DATYPE)
+
+#define	S_RXF_PSH    20
+#define	V_RXF_PSH(x) ((x) << S_RXF_PSH)
+#define	F_RXF_PSH    V_RXF_PSH(1U)
+
+#define	S_RXF_SYN    21
+#define	V_RXF_SYN(x) ((x) << S_RXF_SYN)
+#define	F_RXF_SYN    V_RXF_SYN(1U)
+
+#define	S_RXF_UDP    22
+#define	V_RXF_UDP(x) ((x) << S_RXF_UDP)
+#define	F_RXF_UDP    V_RXF_UDP(1U)
+
+#define	S_RXF_TCP    23
+#define	V_RXF_TCP(x) ((x) << S_RXF_TCP)
+#define	F_RXF_TCP    V_RXF_TCP(1U)
+
+#define	S_RXF_IP    24
+#define	V_RXF_IP(x) ((x) << S_RXF_IP)
+#define	F_RXF_IP    V_RXF_IP(1U)
+
+#define	S_RXF_IP6    25
+#define	V_RXF_IP6(x) ((x) << S_RXF_IP6)
+#define	F_RXF_IP6    V_RXF_IP6(1U)
+
+#define	S_RXF_SYN_COOKIE    26
+#define	V_RXF_SYN_COOKIE(x) ((x) << S_RXF_SYN_COOKIE)
+#define	F_RXF_SYN_COOKIE    V_RXF_SYN_COOKIE(1U)
+
+#define	S_RXF_FCOE    26
+#define	V_RXF_FCOE(x) ((x) << S_RXF_FCOE)
+#define	F_RXF_FCOE    V_RXF_FCOE(1U)
+
+#define	S_RXF_LRO    27
+#define	V_RXF_LRO(x) ((x) << S_RXF_LRO)
+#define	F_RXF_LRO    V_RXF_LRO(1U)
+
+#define	S_RX_CHAN    28
+#define	M_RX_CHAN    0xF
+#define	V_RX_CHAN(x) ((x) << S_RX_CHAN)
+#define	G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
+
+/* rx_pkt.hdr_len fields */
+#define	S_RX_TCPHDR_LEN    0
+#define	M_RX_TCPHDR_LEN    0x3F
+#define	V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
+#define	G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
+
+#define	S_RX_IPHDR_LEN    6
+#define	M_RX_IPHDR_LEN    0x3FF
+#define	V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
+#define	G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
+
+/* rx_pkt.err_vec fields */
+#define	S_RXERR_OR    0
+#define	V_RXERR_OR(x) ((x) << S_RXERR_OR)
+#define	F_RXERR_OR    V_RXERR_OR(1U)
+
+#define	S_RXERR_MAC    1
+#define	V_RXERR_MAC(x) ((x) << S_RXERR_MAC)
+#define	F_RXERR_MAC    V_RXERR_MAC(1U)
+
+#define	S_RXERR_IPVERS    2
+#define	V_RXERR_IPVERS(x) ((x) << S_RXERR_IPVERS)
+#define	F_RXERR_IPVERS    V_RXERR_IPVERS(1U)
+
+#define	S_RXERR_FRAG    3
+#define	V_RXERR_FRAG(x) ((x) << S_RXERR_FRAG)
+#define	F_RXERR_FRAG    V_RXERR_FRAG(1U)
+
+#define	S_RXERR_ATTACK    4
+#define	V_RXERR_ATTACK(x) ((x) << S_RXERR_ATTACK)
+#define	F_RXERR_ATTACK    V_RXERR_ATTACK(1U)
+
+#define	S_RXERR_ETHHDR_LEN    5
+#define	V_RXERR_ETHHDR_LEN(x) ((x) << S_RXERR_ETHHDR_LEN)
+#define	F_RXERR_ETHHDR_LEN    V_RXERR_ETHHDR_LEN(1U)
+
+#define	S_RXERR_IPHDR_LEN    6
+#define	V_RXERR_IPHDR_LEN(x) ((x) << S_RXERR_IPHDR_LEN)
+#define	F_RXERR_IPHDR_LEN    V_RXERR_IPHDR_LEN(1U)
+
+#define	S_RXERR_TCPHDR_LEN    7
+#define	V_RXERR_TCPHDR_LEN(x) ((x) << S_RXERR_TCPHDR_LEN)
+#define	F_RXERR_TCPHDR_LEN    V_RXERR_TCPHDR_LEN(1U)
+
+#define	S_RXERR_PKT_LEN    8
+#define	V_RXERR_PKT_LEN(x) ((x) << S_RXERR_PKT_LEN)
+#define	F_RXERR_PKT_LEN    V_RXERR_PKT_LEN(1U)
+
+#define	S_RXERR_TCP_OPT    9
+#define	V_RXERR_TCP_OPT(x) ((x) << S_RXERR_TCP_OPT)
+#define	F_RXERR_TCP_OPT    V_RXERR_TCP_OPT(1U)
+
+#define	S_RXERR_IPCSUM    12
+#define	V_RXERR_IPCSUM(x) ((x) << S_RXERR_IPCSUM)
+#define	F_RXERR_IPCSUM    V_RXERR_IPCSUM(1U)
+
+#define	S_RXERR_CSUM    13
+#define	V_RXERR_CSUM(x) ((x) << S_RXERR_CSUM)
+#define	F_RXERR_CSUM    V_RXERR_CSUM(1U)
+
+#define	S_RXERR_PING    14
+#define	V_RXERR_PING(x) ((x) << S_RXERR_PING)
+#define	F_RXERR_PING    V_RXERR_PING(1U)
+
+struct cpl_trace_pkt {
+	RSS_HDR
+	__u8 opcode;
+	__u8 intf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 runt:4;
+	__u8 filter_hit:4;
+	__u8 :6;
+	__u8 err:1;
+	__u8 trunc:1;
+#else
+	__u8 filter_hit:4;
+	__u8 runt:4;
+	__u8 trunc:1;
+	__u8 err:1;
+	__u8 :6;
+#endif
+	__be16 rsvd;
+	__be16 len;
+	__be64 tstamp;
+};
+
+struct cpl_rte_delete_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+};
+
+/* {cpl_rte_delete_req, cpl_rte_read_req}.params fields */
+#define	S_RTE_REQ_LUT_IX    8
+#define	M_RTE_REQ_LUT_IX    0x7FF
+#define	V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
+#define	G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
+
+#define	S_RTE_REQ_LUT_BASE    19
+#define	M_RTE_REQ_LUT_BASE    0x7FF
+#define	V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
+#define	G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
+
+#define	S_RTE_READ_REQ_SELECT    31
+#define	V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
+#define	F_RTE_READ_REQ_SELECT    V_RTE_READ_REQ_SELECT(1U)
+
+struct cpl_rte_delete_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__u32 write_sel;
+	__be32 lut_params;
+	__be32 l2t_idx;
+	__be32 netmask;
+	__be32 faddr;
+};
+
+/* cpl_rte_write_req.write_sel fields */
+#define	S_RTE_WR_L2TIDX    31
+#define	V_RTE_WR_L2TIDX(x) ((x) << S_RTE_WR_L2TIDX)
+#define	F_RTE_WR_L2TIDX    V_RTE_WR_L2TIDX(1U)
+
+#define	S_RTE_WR_FADDR    30
+#define	V_RTE_WR_FADDR(x) ((x) << S_RTE_WR_FADDR)
+#define	F_RTE_WR_FADDR    V_RTE_WR_FADDR(1U)
+
+/* cpl_rte_write_req.lut_params fields */
+#define	S_RTE_WR_LUT_IX    10
+#define	M_RTE_WR_LUT_IX    0x7FF
+#define	V_RTE_WR_LUT_IX(x) ((x) << S_RTE_WR_LUT_IX)
+#define	G_RTE_WR_LUT_IX(x) (((x) >> S_RTE_WR_LUT_IX) & M_RTE_WR_LUT_IX)
+
+#define	S_RTE_WR_LUT_BASE    21
+#define	M_RTE_WR_LUT_BASE    0x7FF
+#define	V_RTE_WR_LUT_BASE(x) ((x) << S_RTE_WR_LUT_BASE)
+#define	G_RTE_WR_LUT_BASE(x) (((x) >> S_RTE_WR_LUT_BASE) & M_RTE_WR_LUT_BASE)
+
+struct cpl_rte_write_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+};
+
+struct cpl_rte_read_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd;
+	__be16 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u32 :30;
+	__u32 select:1;
+#else
+	__u32 select:1;
+	__u32 :30;
+#endif
+	__be32 addr;
+};
+
+struct cpl_l2t_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 params;
+	__be16 l2t_idx;
+	__be16 vlan;
+	__u8   dst_mac[6];
+};
+
+/* cpl_l2t_write_req.params fields */
+#define	S_L2T_W_INFO    2
+#define	M_L2T_W_INFO    0x3F
+#define	V_L2T_W_INFO(x) ((x) << S_L2T_W_INFO)
+#define	G_L2T_W_INFO(x) (((x) >> S_L2T_W_INFO) & M_L2T_W_INFO)
+
+#define	S_L2T_W_PORT    8
+#define	M_L2T_W_PORT    0xF
+#define	V_L2T_W_PORT(x) ((x) << S_L2T_W_PORT)
+#define	G_L2T_W_PORT(x) (((x) >> S_L2T_W_PORT) & M_L2T_W_PORT)
+
+#define	S_L2T_W_NOREPLY    15
+#define	V_L2T_W_NOREPLY(x) ((x) << S_L2T_W_NOREPLY)
+#define	F_L2T_W_NOREPLY    V_L2T_W_NOREPLY(1U)
+
+struct cpl_l2t_write_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 :4;
+	__u8 iff:4;
+#else
+	__u8 iff:4;
+	__u8 :4;
+#endif
+	__be16 vlan;
+	__be16 info;
+	__u8 dst_mac[6];
+};
+
+struct cpl_smt_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+	__be16 pfvf1;
+	__u8   src_mac1[6];
+	__be16 pfvf0;
+	__u8   src_mac0[6];
+};
+
+/* cpl_smt_{read,write}_req.params fields */
+#define	S_SMTW_OVLAN_IDX    16
+#define	M_SMTW_OVLAN_IDX    0xF
+#define	V_SMTW_OVLAN_IDX(x) ((x) << S_SMTW_OVLAN_IDX)
+#define	G_SMTW_OVLAN_IDX(x) (((x) >> S_SMTW_OVLAN_IDX) & M_SMTW_OVLAN_IDX)
+
+#define	S_SMTW_IDX    20
+#define	M_SMTW_IDX    0x7F
+#define	V_SMTW_IDX(x) ((x) << S_SMTW_IDX)
+#define	G_SMTW_IDX(x) (((x) >> S_SMTW_IDX) & M_SMTW_IDX)
+
+#define	S_SMTW_NORPL    31
+#define	V_SMTW_NORPL(x) ((x) << S_SMTW_NORPL)
+#define	F_SMTW_NORPL    V_SMTW_NORPL(1U)
+
+/* cpl_smt_{read,write}_req.pfvf? fields */
+#define	S_SMTW_VF    0
+#define	M_SMTW_VF    0xFF
+#define	V_SMTW_VF(x) ((x) << S_SMTW_VF)
+#define	G_SMTW_VF(x) (((x) >> S_SMTW_VF) & M_SMTW_VF)
+
+#define	S_SMTW_PF    8
+#define	M_SMTW_PF    0x7
+#define	V_SMTW_PF(x) ((x) << S_SMTW_PF)
+#define	G_SMTW_PF(x) (((x) >> S_SMTW_PF) & M_SMTW_PF)
+
+#define	S_SMTW_VF_VLD    11
+#define	V_SMTW_VF_VLD(x) ((x) << S_SMTW_VF_VLD)
+#define	F_SMTW_VF_VLD    V_SMTW_VF_VLD(1U)
+
+struct cpl_smt_write_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+};
+
+struct cpl_smt_read_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8   status;
+	__u8   ovlan_idx;
+	__be16 rsvd;
+	__be16 pfvf1;
+	__u8   src_mac1[6];
+	__be16 pfvf0;
+	__u8   src_mac0[6];
+};
+
+struct cpl_barrier {
+	WR_HDR;
+	__u8 opcode;
+	__u8 chan_map;
+	__be16 rsvd0;
+	__be32 rsvd1;
+};
+
+/* cpl_barrier.chan_map fields */
+#define	S_CHAN_MAP    4
+#define	M_CHAN_MAP    0xF
+#define	V_CHAN_MAP(x) ((x) << S_CHAN_MAP)
+#define	G_CHAN_MAP(x) (((x) >> S_CHAN_MAP) & M_CHAN_MAP)
+
+struct cpl_error {
+	RSS_HDR
+	union opcode_tid ot;
+	__be32 error;
+};
+
+struct cpl_hit_notify {
+	RSS_HDR
+	union opcode_tid ot;
+	__be32 rsvd;
+	__be32 info;
+	__be32 reason;
+};
+
+struct cpl_pkt_notify {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+	__be32 info;
+	__be32 reason;
+};
+
+/* cpl_{hit,pkt}_notify.info fields */
+#define	S_NTFY_MAC_IDX    0
+#define	M_NTFY_MAC_IDX    0x1FF
+#define	V_NTFY_MAC_IDX(x) ((x) << S_NTFY_MAC_IDX)
+#define	G_NTFY_MAC_IDX(x) (((x) >> S_NTFY_MAC_IDX) & M_NTFY_MAC_IDX)
+
+#define	S_NTFY_INTF    10
+#define	M_NTFY_INTF    0xF
+#define	V_NTFY_INTF(x) ((x) << S_NTFY_INTF)
+#define	G_NTFY_INTF(x) (((x) >> S_NTFY_INTF) & M_NTFY_INTF)
+
+#define	S_NTFY_TCPHDR_LEN    14
+#define	M_NTFY_TCPHDR_LEN    0xF
+#define	V_NTFY_TCPHDR_LEN(x) ((x) << S_NTFY_TCPHDR_LEN)
+#define	G_NTFY_TCPHDR_LEN(x) (((x) >> S_NTFY_TCPHDR_LEN) & M_NTFY_TCPHDR_LEN)
+
+#define	S_NTFY_IPHDR_LEN    18
+#define	M_NTFY_IPHDR_LEN    0x1FF
+#define	V_NTFY_IPHDR_LEN(x) ((x) << S_NTFY_IPHDR_LEN)
+#define	G_NTFY_IPHDR_LEN(x) (((x) >> S_NTFY_IPHDR_LEN) & M_NTFY_IPHDR_LEN)
+
+#define	S_NTFY_ETHHDR_LEN    27
+#define	M_NTFY_ETHHDR_LEN    0x1F
+#define	V_NTFY_ETHHDR_LEN(x) ((x) << S_NTFY_ETHHDR_LEN)
+#define	G_NTFY_ETHHDR_LEN(x) (((x) >> S_NTFY_ETHHDR_LEN) & M_NTFY_ETHHDR_LEN)
+
+struct cpl_rdma_terminate {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+};
+
+struct cpl_set_le_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 params;
+	__be64 mask_hi;
+	__be64 mask_lo;
+	__be64 val_hi;
+	__be64 val_lo;
+};
+
+/* cpl_set_le_req.reply_ctrl additional fields */
+#define	S_LE_REQ_IP6    13
+#define	V_LE_REQ_IP6(x) ((x) << S_LE_REQ_IP6)
+#define	F_LE_REQ_IP6    V_LE_REQ_IP6(1U)
+
+/* cpl_set_le_req.params fields */
+#define	S_LE_CHAN    0
+#define	M_LE_CHAN    0x3
+#define	V_LE_CHAN(x) ((x) << S_LE_CHAN)
+#define	G_LE_CHAN(x) (((x) >> S_LE_CHAN) & M_LE_CHAN)
+
+#define	S_LE_OFFSET    5
+#define	M_LE_OFFSET    0x7
+#define	V_LE_OFFSET(x) ((x) << S_LE_OFFSET)
+#define	G_LE_OFFSET(x) (((x) >> S_LE_OFFSET) & M_LE_OFFSET)
+
+#define	S_LE_MORE    8
+#define	V_LE_MORE(x) ((x) << S_LE_MORE)
+#define	F_LE_MORE    V_LE_MORE(1U)
+
+#define	S_LE_REQSIZE    9
+#define	M_LE_REQSIZE    0x7
+#define	V_LE_REQSIZE(x) ((x) << S_LE_REQSIZE)
+#define	G_LE_REQSIZE(x) (((x) >> S_LE_REQSIZE) & M_LE_REQSIZE)
+
+#define	S_LE_REQCMD    12
+#define	M_LE_REQCMD    0xF
+#define	V_LE_REQCMD(x) ((x) << S_LE_REQCMD)
+#define	G_LE_REQCMD(x) (((x) >> S_LE_REQCMD) & M_LE_REQCMD)
+
+struct cpl_set_le_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 chan;
+	__u8 info;
+	__be16 len;
+};
+
+/* cpl_set_le_rpl.info fields */
+#define	S_LE_RSPCMD    0
+#define	M_LE_RSPCMD    0xF
+#define	V_LE_RSPCMD(x) ((x) << S_LE_RSPCMD)
+#define	G_LE_RSPCMD(x) (((x) >> S_LE_RSPCMD) & M_LE_RSPCMD)
+
+#define	S_LE_RSPSIZE    4
+#define	M_LE_RSPSIZE    0x7
+#define	V_LE_RSPSIZE(x) ((x) << S_LE_RSPSIZE)
+#define	G_LE_RSPSIZE(x) (((x) >> S_LE_RSPSIZE) & M_LE_RSPSIZE)
+
+#define	S_LE_RSPTYPE    7
+#define	V_LE_RSPTYPE(x) ((x) << S_LE_RSPTYPE)
+#define	F_LE_RSPTYPE    V_LE_RSPTYPE(1U)
+
+struct cpl_sge_egr_update {
+	RSS_HDR
+	__be32 opcode_qid;
+	__be16 cidx;
+	__be16 pidx;
+};
+
+/* cpl_sge_egr_update.ot fields */
+#define	S_EGR_QID    0
+#define	M_EGR_QID    0x1FFFF
+#define	V_EGR_QID(x) ((x) << S_EGR_QID)
+#define	G_EGR_QID(x) (((x) >> S_EGR_QID) & M_EGR_QID)
+
+struct cpl_fw2_pld {
+	RSS_HDR
+	u8 opcode;
+	u8 rsvd[5];
+	__be16 len;
+};
+
+struct cpl_fw4_pld {
+	RSS_HDR
+	u8 opcode;
+	u8 rsvd0[3];
+	u8 type;
+	u8 rsvd1;
+	__be16 len;
+	__be64 data;
+	__be64 rsvd2;
+};
+
+struct cpl_fw6_pld {
+	RSS_HDR
+	u8 opcode;
+	u8 rsvd[5];
+	__be16 len;
+	__be64 data[4];
+};
+
+struct cpl_fw2_msg {
+	RSS_HDR
+	union opcode_info oi;
+};
+
+struct cpl_fw4_msg {
+	RSS_HDR
+	u8 opcode;
+	u8 type;
+	__be16 rsvd0;
+	__be32 rsvd1;
+	__be64 data[2];
+};
+
+struct cpl_fw4_ack {
+	RSS_HDR
+	union opcode_tid ot;
+	u8 credits;
+	u8 rsvd0[2];
+	u8 flags;
+	__be32 snd_nxt;
+	__be32 snd_una;
+	__be64 rsvd1;
+};
+
+enum {
+	CPL_FW4_ACK_FLAGS_SEQVAL	= 0x1,  /* seqn valid */
+	CPL_FW4_ACK_FLAGS_CH		= 0x2,  /* channel change complete */
+	CPL_FW4_ACK_FLAGS_FLOWC		= 0x4,  /* fw_flowc_wr complete */
+};
+
+struct cpl_fw6_msg {
+	RSS_HDR
+	u8 opcode;
+	u8 type;
+	__be16 rsvd0;
+	__be32 rsvd1;
+	__be64 data[4];
+};
+
+/* cpl_fw6_msg.type values */
+enum {
+	FW6_TYPE_CMD_RPL = 0,
+};
+
+/* ULP_TX opcodes */
+enum {
+	ULP_TX_MEM_READ = 2,
+	ULP_TX_MEM_WRITE = 3,
+	ULP_TX_PKT = 4
+};
+
+enum {
+	ULP_TX_SC_NOOP = 0x80,
+	ULP_TX_SC_IMM  = 0x81,
+	ULP_TX_SC_DSGL = 0x82,
+	ULP_TX_SC_ISGL = 0x83
+};
+
+#define	S_ULPTX_CMD    24
+#define	M_ULPTX_CMD    0xFF
+#define	V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
+
+#define	S_ULPTX_LEN16    0
+#define	M_ULPTX_LEN16    0xFF
+#define	V_ULPTX_LEN16(x) ((x) << S_ULPTX_LEN16)
+
+#define	S_ULP_TX_SC_MORE 23
+#define	V_ULP_TX_SC_MORE(x) ((x) << S_ULP_TX_SC_MORE)
+#define	F_ULP_TX_SC_MORE  V_ULP_TX_SC_MORE(1U)
+
+struct ulptx_sge_pair {
+	__be32 len[2];
+	__be64 addr[2];
+};
+
+struct ulptx_sgl {
+	__be32 cmd_nsge;
+	__be32 len0;
+	__be64 addr0;
+#if !(defined C99_NOT_SUPPORTED)
+	struct ulptx_sge_pair sge[];
+#endif
+};
+
+struct ulptx_isge {
+	__be32 stag;
+	__be32 len;
+	__be64 target_ofst;
+};
+
+struct ulptx_isgl {
+	__be32 cmd_nisge;
+	__be32 rsvd;
+#if !(defined C99_NOT_SUPPORTED)
+	struct ulptx_isge sge[];
+#endif
+};
+
+struct ulptx_idata {
+	__be32 cmd_more;
+	__be32 len;
+};
+
+#define	S_ULPTX_NSGE    0
+#define	M_ULPTX_NSGE    0xFFFF
+#define	V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+
+struct ulp_mem_io {
+	WR_HDR;
+	__be32 cmd;
+	__be32 len16;		/* command length */
+	__be32 dlen;		/* data length in 32-byte units */
+	__be32 lock_addr;
+};
+
+/* additional ulp_mem_io.cmd fields */
+#define	S_ULP_MEMIO_ORDER    23
+#define	V_ULP_MEMIO_ORDER(x) ((x) << S_ULP_MEMIO_ORDER)
+#define	F_ULP_MEMIO_ORDER    V_ULP_MEMIO_ORDER(1U)
+
+/* ulp_mem_io.lock_addr fields */
+#define	S_ULP_MEMIO_ADDR	0
+#define	M_ULP_MEMIO_ADDR	0x7FFFFFF
+#define	V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR)
+
+#define	S_ULP_MEMIO_LOCK	31
+#define	V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK)
+#define	F_ULP_MEMIO_LOCK    V_ULP_MEMIO_LOCK(1U)
+
+/* ulp_mem_io.dlen fields */
+#define	S_ULP_MEMIO_DATA_LEN	0
+#define	M_ULP_MEMIO_DATA_LEN	0x1F
+#define	V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
+
+struct ulp_txpkt {
+	__be32 cmd_dest;
+	__be32 len;
+};
+
+/* ulp_txpkt.cmd_dest fields */
+#define	S_ULP_TXPKT_DEST	16
+#define	M_ULP_TXPKT_DEST	0x3
+#define	V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
+
+#define	S_ULP_TXPKT_FID		4
+#define	M_ULP_TXPKT_FID		0x7ff
+#define	V_ULP_TXPKT_FID(x)  ((x) << S_ULP_TXPKT_FID)
+
+#endif /* __CXGBE_T4_MSG_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/cxgbe/common/t4_regs.h	Thu May 23 09:51:05 2013 -0400
@@ -0,0 +1,24123 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * This file is part of the Chelsio T4 support code.
+ *
+ * Copyright (C) 2003-2013 Chelsio Communications.  All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+/* This file was automatically generated --- changes will be lost */
+
+#ifndef _CXGBE_T4_REGS_H
+#define	_CXGBE_T4_REGS_H
+
+#define	MYPF_BASE 0x1b000
+#define	MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
+
+#define	PF0_BASE 0x1e000
+#define	PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
+
+#define	PF1_BASE 0x1e400
+#define	PF1_REG(reg_addr) (PF1_BASE + (reg_addr))
+
+#define	PF2_BASE 0x1e800
+#define	PF2_REG(reg_addr) (PF2_BASE + (reg_addr))
+
+#define	PF3_BASE 0x1ec00
+#define	PF3_REG(reg_addr) (PF3_BASE + (reg_addr))
+
+#define	PF4_BASE 0x1f000
+#define	PF4_REG(reg_addr) (PF4_BASE + (reg_addr))
+
+#define	PF5_BASE 0x1f400
+#define	PF5_REG(reg_addr) (PF5_BASE + (reg_addr))
+
+#define	PF6_BASE 0x1f800
+#define	PF6_REG(reg_addr) (PF6_BASE + (reg_addr))
+
+#define	PF7_BASE 0x1fc00
+#define	PF7_REG(reg_addr) (PF7_BASE + (reg_addr))
+
+#define	PF_STRIDE 0x400
+#define	PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
+#define	PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+
+#define	MYPORT_BASE 0x1c000
+#define	MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
+
+#define	PORT0_BASE 0x20000
+#define	PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
+
+#define	PORT1_BASE 0x22000
+#define	PORT1_REG(reg_addr) (PORT1_BASE + (reg_addr))
+
+#define	PORT2_BASE 0x24000
+#define	PORT2_REG(reg_addr) (PORT2_BASE + (reg_addr))
+
+#define	PORT3_BASE 0x26000
+#define	PORT3_REG(reg_addr) (PORT3_BASE + (reg_addr))
+
+#define	PORT_STRIDE 0x2000
+#define	PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
+#define	PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
+
+#define	VF_SGE_BASE 0x0
+#define	VF_SGE_REG(reg_addr) (VF_SGE_BASE + (reg_addr))
+
+#define	VF_MPS_BASE 0x100
+#define	VF_MPS_REG(reg_addr) (VF_MPS_BASE + (reg_addr))
+
+#define	VF_PL_BASE 0x200
+#define	VF_PL_REG(reg_addr) (VF_PL_BASE + (reg_addr))
+
+#define	VF_MBDATA_BASE 0x240
+#define	VF_MBDATA_REG(reg_addr) (VF_MBDATA_BASE + (reg_addr))
+
+#define	VF_CIM_BASE 0x300
+#define	VF_CIM_REG(reg_addr) (VF_CIM_BASE + (reg_addr))
+
+#define	EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
+#define	EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
+
+#define	SGE_QUEUE_BASE_MAP_HIGH(idx) (A_SGE_QUEUE_BASE_MAP_HIGH + (idx) * 8)
+#define	NUM_SGE_QUEUE_BASE_MAP_HIGH_INSTANCES 136
+
+#define	SGE_QUEUE_BASE_MAP_LOW(idx) (A_SGE_QUEUE_BASE_MAP_LOW + (idx) * 8)
+#define	NUM_SGE_QUEUE_BASE_MAP_LOW_INSTANCES 136
+
+#define	PCIE_DMA_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define	NUM_PCIE_DMA_INSTANCES 4
+
+#define	PCIE_CMD_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define	NUM_PCIE_CMD_INSTANCES 2
+
+#define	PCIE_HMA_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define	NUM_PCIE_HMA_INSTANCES 1
+
+#define	PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define	NUM_PCIE_MEM_ACCESS_INSTANCES 8
+
+#define	PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define	NUM_PCIE_MAILBOX_INSTANCES 1
+
+#define	PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define	NUM_PCIE_FW_INSTANCES 8
+
+#define	PCIE_FUNC_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define	NUM_PCIE_FUNC_INSTANCES 256
+
+#define	PCIE_FID(idx) (A_PCIE_FID + (idx) * 4)
+#define	NUM_PCIE_FID_INSTANCES 2048
+
+#define	PCIE_DMA_BUF_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define	NUM_PCIE_DMA_BUF_INSTANCES 4
+
+#define	MC_DDR3PHYDATX8_REG(reg_addr, idx) ((reg_addr) + (idx) * 256)
+#define	NUM_MC_DDR3PHYDATX8_INSTANCES 9
+
+#define	MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define	NUM_MC_BIST_STATUS_INSTANCES 18
+
+#define	EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define	NUM_EDC_BIST_STATUS_INSTANCES 18
+
+#define	CIM_PF_MAILBOX_DATA(idx) (A_CIM_PF_MAILBOX_DATA + (idx) * 4)
+#define	NUM_CIM_PF_MAILBOX_DATA_INSTANCES 16
+
+#define	MPS_TRC_FILTER_MATCH_CTL_A(idx) \
+	(A_MPS_TRC_FILTER_MATCH_CTL_A + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER_MATCH_CTL_A_INSTANCES 4
+
+#define	MPS_TRC_FILTER_MATCH_CTL_B(idx) \
+	(A_MPS_TRC_FILTER_MATCH_CTL_B + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER_MATCH_CTL_B_INSTANCES 4
+
+#define	MPS_TRC_FILTER_RUNT_CTL(idx) (A_MPS_TRC_FILTER_RUNT_CTL + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER_RUNT_CTL_INSTANCES 4
+
+#define	MPS_TRC_FILTER_DROP(idx) (A_MPS_TRC_FILTER_DROP + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER_DROP_INSTANCES 4
+
+#define	MPS_TRC_FILTER0_MATCH(idx) (A_MPS_TRC_FILTER0_MATCH + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER0_MATCH_INSTANCES 28
+
+#define	MPS_TRC_FILTER0_DONT_CARE(idx) (A_MPS_TRC_FILTER0_DONT_CARE + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER0_DONT_CARE_INSTANCES 28
+
+#define	MPS_TRC_FILTER1_MATCH(idx) (A_MPS_TRC_FILTER1_MATCH + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER1_MATCH_INSTANCES 28
+
+#define	MPS_TRC_FILTER1_DONT_CARE(idx) (A_MPS_TRC_FILTER1_DONT_CARE + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER1_DONT_CARE_INSTANCES 28
+
+#define	MPS_TRC_FILTER2_MATCH(idx) (A_MPS_TRC_FILTER2_MATCH + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER2_MATCH_INSTANCES 28
+
+#define	MPS_TRC_FILTER2_DONT_CARE(idx) (A_MPS_TRC_FILTER2_DONT_CARE + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER2_DONT_CARE_INSTANCES 28
+
+#define	MPS_TRC_FILTER3_MATCH(idx) (A_MPS_TRC_FILTER3_MATCH + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER3_MATCH_INSTANCES 28
+
+#define	MPS_TRC_FILTER3_DONT_CARE(idx) (A_MPS_TRC_FILTER3_DONT_CARE + (idx) * 4)
+#define	NUM_MPS_TRC_FILTER3_DONT_CARE_INSTANCES 28
+
+#define	MPS_PORT_CLS_HASH_SRAM(idx) (A_MPS_PORT_CLS_HASH_SRAM + (idx) * 4)
+#define	NUM_MPS_PORT_CLS_HASH_SRAM_INSTANCES 65
+
+#define	MPS_CLS_VLAN_TABLE(idx) (A_MPS_CLS_VLAN_TABLE + (idx) * 4)
+#define	NUM_MPS_CLS_VLAN_TABLE_INSTANCES 9
+
+#define	MPS_CLS_SRAM_L(idx) (A_MPS_CLS_SRAM_L + (idx) * 8)
+#define	NUM_MPS_CLS_SRAM_L_INSTANCES 336
+
+#define	MPS_CLS_SRAM_H(idx) (A_MPS_CLS_SRAM_H + (idx) * 8)
+#define	NUM_MPS_CLS_SRAM_H_INSTANCES 336
+
+#define	MPS_CLS_TCAM_Y_L(idx) (A_MPS_CLS_TCAM_Y_L + (idx) * 16)
+#define	NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
+
+#define	MPS_CLS_TCAM_Y_H(idx) (A_MPS_CLS_TCAM_Y_H + (idx) * 16)
+#define	NUM_MPS_CLS_TCAM_Y_H_INSTANCES 512
+
+#define	MPS_CLS_TCAM_X_L(idx) (A_MPS_CLS_TCAM_X_L + (idx) * 16)
+#define	NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
+
+#define	MPS_CLS_TCAM_X_H(idx) (A_MPS_CLS_TCAM_X_H + (idx) * 16)
+#define	NUM_MPS_CLS_TCAM_X_H_INSTANCES 512
+
+#define	PL_SEMAPHORE_LOCK(idx) (A_PL_SEMAPHORE_LOCK + (idx) * 4)
+#define	NUM_PL_SEMAPHORE_LOCK_INSTANCES 8
+
+#define	PL_VF_SLICE_L(idx) (A_PL_VF_SLICE_L + (idx) * 8)
+#define	NUM_PL_VF_SLICE_L_INSTANCES 8
+
+#define	PL_VF_SLICE_H(idx) (A_PL_VF_SLICE_H + (idx) * 8)
+#define	NUM_PL_VF_SLICE_H_INSTANCES 8
+
+#define	PL_FLR_VF_STATUS(idx) (A_PL_FLR_VF_STATUS + (idx) * 4)
+#define	NUM_PL_FLR_VF_STATUS_INSTANCES 4
+
+#define	PL_VFID_MAP(idx) (A_PL_VFID_MAP + (idx) * 4)
+#define	NUM_PL_VFID_MAP_INSTANCES 256
+
+#define	LE_DB_MASK_IPV4(idx) (A_LE_DB_MASK_IPV4 + (idx) * 4)
+#define	NUM_LE_DB_MASK_IPV4_INSTANCES 17
+
+#define	LE_DB_MASK_IPV6(idx) (A_LE_DB_MASK_IPV6 + (idx) * 4)
+#define	NUM_LE_DB_MASK_IPV6_INSTANCES 17
+
+#define	LE_DB_DBGI_REQ_DATA(idx) (A_LE_DB_DBGI_REQ_DATA + (idx) * 4)
+#define	NUM_LE_DB_DBGI_REQ_DATA_INSTANCES 17
+
+#define	LE_DB_DBGI_REQ_MASK(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
+#define	NUM_LE_DB_DBGI_REQ_MASK_INSTANCES 17
+
+#define	LE_DB_DBGI_RSP_DATA(idx) (A_LE_DB_DBGI_RSP_DATA + (idx) * 4)
+#define	NUM_LE_DB_DBGI_RSP_DATA_INSTANCES 17
+
+#define	LE_DB_ACTIVE_MASK_IPV4(idx) (A_LE_DB_ACTIVE_MASK_IPV4 + (idx) * 4)
+#define	NUM_LE_DB_ACTIVE_MASK_IPV4_INSTANCES 17
+
+#define	LE_DB_ACTIVE_MASK_IPV6(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
+#define	NUM_LE_DB_ACTIVE_MASK_IPV6_INSTANCES 17
+
+#define	LE_HASH_MASK_GEN_IPV4(idx) (A_LE_HASH_MASK_GEN_IPV4 + (idx) * 4)
+#define	NUM_LE_HASH_MASK_GEN_IPV4_INSTANCES 4
+
+#define	LE_HASH_MASK_GEN_IPV6(idx) (A_LE_HASH_MASK_GEN_IPV6 + (idx) * 4)
+#define	NUM_LE_HASH_MASK_GEN_IPV6_INSTANCES 12
+
+#define	LE_HASH_MASK_CMP_IPV4(idx) (A_LE_HASH_MASK_CMP_IPV4 + (idx) * 4)
+#define	NUM_LE_HASH_MASK_CMP_IPV4_INSTANCES 4
+
+#define	LE_HASH_MASK_CMP_IPV6(idx) (A_LE_HASH_MASK_CMP_IPV6 + (idx) * 4)
+#define	NUM_LE_HASH_MASK_CMP_IPV6_INSTANCES 12
+
+#define	UP_TSCH_CHANNEL_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define	NUM_UP_TSCH_CHANNEL_INSTANCES 4
+
+#define	CIM_CTL_MAILBOX_VF_STATUS(idx) (A_CIM_CTL_MAILBOX_VF_STATUS + (idx) * 4)
+#define	NUM_CIM_CTL_MAILBOX_VF_STATUS_INSTANCES 4
+
+#define	CIM_CTL_MAILBOX_VFN_CTL(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 16)
+#define	NUM_CIM_CTL_MAILBOX_VFN_CTL_INSTANCES 128
+
+#define	CIM_CTL_TSCH_CHANNEL_REG(reg_addr, idx) ((reg_addr) + (idx) * 288)
+#define	NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4
+
+#define	CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_REG(reg_addr, idx) \
+	((reg_addr) + (idx) * 16)
+#define	NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16
+
+/* registers for module SGE */
+#define	SGE_BASE_ADDR 0x1000
+
+#define	A_SGE_PF_KDOORBELL 0x0
+
+#define	S_QID    15
+#define	M_QID    0x1ffffU
+#define	V_QID(x) ((x) << S_QID)
+#define	G_QID(x) (((x) >> S_QID) & M_QID)
+
+#define	S_DBPRIO    14
+#define	V_DBPRIO(x) ((x) << S_DBPRIO)
+#define	F_DBPRIO    V_DBPRIO(1U)
+
+#define	S_PIDX    0
+#define	M_PIDX    0x3fffU
+#define	V_PIDX(x) ((x) << S_PIDX)
+#define	G_PIDX(x) (((x) >> S_PIDX) & M_PIDX)
+
+#define	A_SGE_VF_KDOORBELL 0x0
+#define	A_SGE_PF_GTS 0x4
+
+#define	S_INGRESSQID    16
+#define	M_INGRESSQID    0xffffU
+#define	V_INGRESSQID(x) ((x) << S_INGRESSQID)
+#define	G_INGRESSQID(x) (((x) >> S_INGRESSQID) & M_INGRESSQID)
+
+#define	S_TIMERREG    13
+#define	M_TIMERREG    0x7U
+#define	V_TIMERREG(x) ((x) << S_TIMERREG)
+#define	G_TIMERREG(x) (((x) >> S_TIMERREG) & M_TIMERREG)
+
+#define	S_SEINTARM    12
+#define	V_SEINTARM(x) ((x) << S_SEINTARM)
+#define	F_SEINTARM    V_SEINTARM(1U)
+
+#define	S_CIDXINC    0
+#define	M_CIDXINC    0xfffU
+#define	V_CIDXINC(x) ((x) << S_CIDXINC)
+#define	G_CIDXINC(x) (((x) >> S_CIDXINC) & M_CIDXINC)
+
+#define	A_SGE_VF_GTS 0x4
+#define	A_SGE_CONTROL 0x1008
+
+#define	S_IGRALLCPLTOFL    31
+#define	V_IGRALLCPLTOFL(x) ((x) << S_IGRALLCPLTOFL)
+#define	F_IGRALLCPLTOFL    V_IGRALLCPLTOFL(1U)
+
+#define	S_FLSPLITMIN    22
+#define	M_FLSPLITMIN    0x1ffU
+#define	V_FLSPLITMIN(x) ((x) << S_FLSPLITMIN)
+#define	G_FLSPLITMIN(x) (((x) >> S_FLSPLITMIN) & M_FLSPLITMIN)
+
+#define	S_FLSPLITMODE    20
+#define	M_FLSPLITMODE    0x3U
+#define	V_FLSPLITMODE(x) ((x) << S_FLSPLITMODE)
+#define	G_FLSPLITMODE(x) (((x) >> S_FLSPLITMODE) & M_FLSPLITMODE)
+
+#define	S_DCASYSTYPE    19
+#define	V_DCASYSTYPE(x) ((x) << S_DCASYSTYPE)
+#define	F_DCASYSTYPE    V_DCASYSTYPE(1U)
+
+#define	S_RXPKTCPLMODE    18
+#define	V_RXPKTCPLMODE(x) ((x) << S_RXPKTCPLMODE)
+#define	F_RXPKTCPLMODE    V_RXPKTCPLMODE(1U)
+
+#define	S_EGRSTATUSPAGESIZE    17
+#define	V_EGRSTATUSPAGESIZE(x) ((x) << S_EGRSTATUSPAGESIZE)
+#define	F_EGRSTATUSPAGESIZE    V_EGRSTATUSPAGESIZE(1U)
+
+#define	S_INGHINTENABLE1    15
+#define	V_INGHINTENABLE1(x) ((x) << S_INGHINTENABLE1)
+#define	F_INGHINTENABLE1    V_INGHINTENABLE1(1U)
+
+#define	S_INGHINTENABLE0    14
+#define	V_INGHINTENABLE0(x) ((x) << S_INGHINTENABLE0)
+#define	F_INGHINTENABLE0    V_INGHINTENABLE0(1U)
+
+#define	S_INGINTCOMPAREIDX    13
+#define	V_INGINTCOMPAREIDX(x) ((x) << S_INGINTCOMPAREIDX)
+#define	F_INGINTCOMPAREIDX    V_INGINTCOMPAREIDX(1U)
+
+#define	S_PKTSHIFT    10
+#define	M_PKTSHIFT    0x7U
+#define	V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+#define	G_PKTSHIFT(x) (((x) >> S_PKTSHIFT) & M_PKTSHIFT)
+
+#define	S_INGPCIEBOUNDARY    7
+#define	M_INGPCIEBOUNDARY    0x7U
+#define	V_INGPCIEBOUNDARY(x) ((x) << S_INGPCIEBOUNDARY)
+#define	G_INGPCIEBOUNDARY(x) (((x) >> S_INGPCIEBOUNDARY) & M_INGPCIEBOUNDARY)
+
+#define	S_INGPADBOUNDARY    4
+#define	M_INGPADBOUNDARY    0x7U
+#define	V_INGPADBOUNDARY(x) ((x) << S_INGPADBOUNDARY)
+#define	G_INGPADBOUNDARY(x) (((x) >> S_INGPADBOUNDARY) & M_INGPADBOUNDARY)
+
+#define	S_EGRPCIEBOUNDARY    1
+#define	M_EGRPCIEBOUNDARY    0x7U
+#define	V_EGRPCIEBOUNDARY(x) ((x) << S_EGRPCIEBOUNDARY)
+#define	G_EGRPCIEBOUNDARY(x) (((x) >> S_EGRPCIEBOUNDARY) & M_EGRPCIEBOUNDARY)
+
+#define	S_GLOBALENABLE    0
+#define	V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
+#define	F_GLOBALENABLE    V_GLOBALENABLE(1U)
+
+#define	A_SGE_HOST_PAGE_SIZE 0x100c
+
+#define	S_HOSTPAGESIZEPF7    28
+#define	M_HOSTPAGESIZEPF7    0xfU
+#define	V_HOSTPAGESIZEPF7(x) ((x) << S_HOSTPAGESIZEPF7)
+#define	G_HOSTPAGESIZEPF7(x) (((x) >> S_HOSTPAGESIZEPF7) & M_HOSTPAGESIZEPF7)
+
+#define	S_HOSTPAGESIZEPF6    24
+#define	M_HOSTPAGESIZEPF6    0xfU
+#define	V_HOSTPAGESIZEPF6(x) ((x) << S_HOSTPAGESIZEPF6)
+#define	G_HOSTPAGESIZEPF6(x) (((x) >> S_HOSTPAGESIZEPF6) & M_HOSTPAGESIZEPF6)
+
+#define	S_HOSTPAGESIZEPF5    20
+#define	M_HOSTPAGESIZEPF5    0xfU
+#define	V_HOSTPAGESIZEPF5(x) ((x) << S_HOSTPAGESIZEPF5)
+#define	G_HOSTPAGESIZEPF5(x) (((x) >> S_HOSTPAGESIZEPF5) & M_HOSTPAGESIZEPF5)
+
+#define	S_HOSTPAGESIZEPF4    16
+#define	M_HOSTPAGESIZEPF4    0xfU
+#define	V_HOSTPAGESIZEPF4(x) ((x) << S_HOSTPAGESIZEPF4)
+#define	G_HOSTPAGESIZEPF4(x) (((x) >> S_HOSTPAGESIZEPF4) & M_HOSTPAGESIZEPF4)
+
+#define	S_HOSTPAGESIZEPF3    12
+#define	M_HOSTPAGESIZEPF3    0xfU
+#define	V_HOSTPAGESIZEPF3(x) ((x) << S_HOSTPAGESIZEPF3)
+#define	G_HOSTPAGESIZEPF3(x) (((x) >> S_HOSTPAGESIZEPF3) & M_HOSTPAGESIZEPF3)
+
+#define	S_HOSTPAGESIZEPF2    8
+#define	M_HOSTPAGESIZEPF2    0xfU
+#define	V_HOSTPAGESIZEPF2(x) ((x) << S_HOSTPAGESIZEPF2)
+#define	G_HOSTPAGESIZEPF2(x) (((x) >> S_HOSTPAGESIZEPF2) & M_HOSTPAGESIZEPF2)
+
+#define	S_HOSTPAGESIZEPF1    4
+#define	M_HOSTPAGESIZEPF1    0xfU
+#define	V_HOSTPAGESIZEPF1(x) ((x) << S_HOSTPAGESIZEPF1)
+#define	G_HOSTPAGESIZEPF1(x) (((x) >> S_HOSTPAGESIZEPF1) & M_HOSTPAGESIZEPF1)
+
+#define	S_HOSTPAGESIZEPF0    0
+#define	M_HOSTPAGESIZEPF0    0xfU
+#define	V_HOSTPAGESIZEPF0(x) ((x) << S_HOSTPAGESIZEPF0)
+#define	G_HOSTPAGESIZEPF0(x) (((x) >> S_HOSTPAGESIZEPF0) & M_HOSTPAGESIZEPF0)
+
+#define	A_SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
+
+#define	S_QUEUESPERPAGEPF7    28
+#define	M_QUEUESPERPAGEPF7    0xfU
+#define	V_QUEUESPERPAGEPF7(x) ((x) << S_QUEUESPERPAGEPF7)
+#define	G_QUEUESPERPAGEPF7(x) (((x) >> S_QUEUESPERPAGEPF7) & M_QUEUESPERPAGEPF7)
+
+#define	S_QUEUESPERPAGEPF6    24
+#define	M_QUEUESPERPAGEPF6    0xfU
+#define	V_QUEUESPERPAGEPF6(x) ((x) << S_QUEUESPERPAGEPF6)
+#define	G_QUEUESPERPAGEPF6(x) (((x) >> S_QUEUESPERPAGEPF6) & M_QUEUESPERPAGEPF6)
+
+#define	S_QUEUESPERPAGEPF5    20
+#define	M_QUEUESPERPAGEPF5    0xfU
+#define	V_QUEUESPERPAGEPF5(x) ((x) << S_QUEUESPERPAGEPF5)
+#define	G_QUEUESPERPAGEPF5(x) (((x) >> S_QUEUESPERPAGEPF5) & M_QUEUESPERPAGEPF5)
+
+#define	S_QUEUESPERPAGEPF4    16
+#define	M_QUEUESPERPAGEPF4    0xfU
+#define	V_QUEUESPERPAGEPF4(x) ((x) << S_QUEUESPERPAGEPF4)
+#define	G_QUEUESPERPAGEPF4(x) (((x) >> S_QUEUESPERPAGEPF4) & M_QUEUESPERPAGEPF4)
+
+#define	S_QUEUESPERPAGEPF3    12
+#define	M_QUEUESPERPAGEPF3    0xfU
+#define	V_QUEUESPERPAGEPF3(x) ((x) << S_QUEUESPERPAGEPF3)
+#define	G_QUEUESPERPAGEPF3(x) (((x) >> S_QUEUESPERPAGEPF3) & M_QUEUESPERPAGEPF3)
+
+#define	S_QUEUESPERPAGEPF2    8
+#define	M_QUEUESPERPAGEPF2    0xfU
+#define	V_QUEUESPERPAGEPF2(x) ((x) << S_QUEUESPERPAGEPF2)
+#define	G_QUEUESPERPAGEPF2(x) (((x) >> S_QUEUESPERPAGEPF2) & M_QUEUESPERPAGEPF2)
+
+#define	S_QUEUESPERPAGEPF1    4
+#define	M_QUEUESPERPAGEPF1    0xfU
+#define	V_QUEUESPERPAGEPF1(x) ((x) << S_QUEUESPERPAGEPF1)
+#define	G_QUEUESPERPAGEPF1(x) (((x) >> S_QUEUESPERPAGEPF1) & M_QUEUESPERPAGEPF1)
+
+#define	S_QUEUESPERPAGEPF0    0
+#define	M_QUEUESPERPAGEPF0    0xfU
+#define	V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0)
+#define	G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0)
+
+#define	A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014
+
+#define	S_QUEUESPERPAGEVFPF7    28
+#define	M_QUEUESPERPAGEVFPF7    0xfU
+#define	V_QUEUESPERPAGEVFPF7(x) ((x) << S_QUEUESPERPAGEVFPF7)
+#define	G_QUEUESPERPAGEVFPF7(x) \
+	(((x) >> S_QUEUESPERPAGEVFPF7) & M_QUEUESPERPAGEVFPF7)
+
+#define	S_QUEUESPERPAGEVFPF6    24
+#define	M_QUEUESPERPAGEVFPF6    0xfU
+#define	V_QUEUESPERPAGEVFPF6(x) ((x) << S_QUEUESPERPAGEVFPF6)
+#define	G_QUEUESPERPAGEVFPF6(x) \
+	(((x) >> S_QUEUESPERPAGEVFPF6) & M_QUEUESPERPAGEVFPF6)
+
+#define	S_QUEUESPERPAGEVFPF5    20
+#define	M_QUEUESPERPAGEVFPF5    0xfU
+#define	V_QUEUESPERPAGEVFPF5(x) ((x) << S_QUEUESPERPAGEVFPF5)
+#define	G_QUEUESPERPAGEVFPF5(x) \
+	(((x) >> S_QUEUESPERPAGEVFPF5) & M_QUEUESPERPAGEVFPF5)
+
+#define	S_QUEUESPERPAGEVFPF4    16
+#define	M_QUEUESPERPAGEVFPF4    0xfU
+#define	V_QUEUESPERPAGEVFPF4(x) ((x) << S_QUEUESPERPAGEVFPF4)
+#define	G_QUEUESPERPAGEVFPF4(x) \
+	(((x) >> S_QUEUESPERPAGEVFPF4) & M_QUEUESPERPAGEVFPF4)
+
+#define	S_QUEUESPERPAGEVFPF3    12
+#define	M_QUEUESPERPAGEVFPF3    0xfU
+#define	V_QUEUESPERPAGEVFPF3(x) ((x) << S_QUEUESPERPAGEVFPF3)
+#define	G_QUEUESPERPAGEVFPF3(x) \
+	(((x) >> S_QUEUESPERPAGEVFPF3) & M_QUEUESPERPAGEVFPF3)
+
+#define	S_QUEUESPERPAGEVFPF2    8
+#define	M_QUEUESPERPAGEVFPF2    0xfU
+#define	V_QUEUESPERPAGEVFPF2(x) ((x) << S_QUEUESPERPAGEVFPF2)
+#define	G_QUEUESPERPAGEVFPF2(x) \
+	(((x) >> S_QUEUESPERPAGEVFPF2) & M_QUEUESPERPAGEVFPF2)
+
+#define	S_QUEUESPERPAGEVFPF1    4
+#define	M_QUEUESPERPAGEVFPF1    0xfU
+#define	V_QUEUESPERPAGEVFPF1(x) ((x) << S_QUEUESPERPAGEVFPF1)
+#define	G_QUEUESPERPAGEVFPF1(x) \
+	(((x) >> S_QUEUESPERPAGEVFPF1) & M_QUEUESPERPAGEVFPF1)
+
+#define	S_QUEUESPERPAGEVFPF0    0
+#define	M_QUEUESPERPAGEVFPF0    0xfU
+#define	V_QUEUESPERPAGEVFPF0(x) ((x) << S_QUEUESPERPAGEVFPF0)
+#define	G_QUEUESPERPAGEVFPF0(x) \
+	(((x) >> S_QUEUESPERPAGEVFPF0) & M_QUEUESPERPAGEVFPF0)
+
+#define	A_SGE_USER_MODE_LIMITS 0x1018
+
+#define	S_OPCODE_MIN    24
+#define	M_OPCODE_MIN    0xffU
+#define	V_OPCODE_MIN(x) ((x) << S_OPCODE_MIN)
+#define	G_OPCODE_MIN(x) (((x) >> S_OPCODE_MIN) & M_OPCODE_MIN)
+
+#define	S_OPCODE_MAX    16
+#define	M_OPCODE_MAX    0xffU
+#define	V_OPCODE_MAX(x) ((x) << S_OPCODE_MAX)
+#define	G_OPCODE_MAX(x) (((x) >> S_OPCODE_MAX) & M_OPCODE_MAX)
+
+#define	S_LENGTH_MIN    8
+#define	M_LENGTH_MIN    0xffU
+#define	V_LENGTH_MIN(x) ((x) << S_LENGTH_MIN)
+#define	G_LENGTH_MIN(x) (((x) >> S_LENGTH_MIN) & M_LENGTH_MIN)
+
+#define	S_LENGTH_MAX    0
+#define	M_LENGTH_MAX    0xffU
+#define	V_LENGTH_MAX(x) ((x) << S_LENGTH_MAX)
+#define	G_LENGTH_MAX(x) (((x) >> S_LENGTH_MAX) & M_LENGTH_MAX)
+
+#define	A_SGE_WR_ERROR 0x101c
+
+#define	S_WR_ERROR_OPCODE    0
+#define	M_WR_ERROR_OPCODE    0xffU
+#define	V_WR_ERROR_OPCODE(x) ((x) << S_WR_ERROR_OPCODE)
+#define	G_WR_ERROR_OPCODE(x) (((x) >> S_WR_ERROR_OPCODE) & M_WR_ERROR_OPCODE)
+
+#define	A_SGE_PERR_INJECT 0x1020
+
+#define	S_MEMSEL    1
+#define	M_MEMSEL    0x1fU
+#define	V_MEMSEL(x) ((x) << S_MEMSEL)
+#define	G_MEMSEL(x) (((x) >> S_MEMSEL) & M_MEMSEL)
+
+#define	S_INJECTDATAERR    0
+#define	V_INJECTDATAERR(x) ((x) << S_INJECTDATAERR)
+#define	F_INJECTDATAERR    V_INJECTDATAERR(1U)
+
+#define	A_SGE_INT_CAUSE1 0x1024
+
+#define	S_PERR_FLM_CREDITFIFO    30
+#define	V_PERR_FLM_CREDITFIFO(x) ((x) << S_PERR_FLM_CREDITFIFO)
+#define	F_PERR_FLM_CREDITFIFO    V_PERR_FLM_CREDITFIFO(1U)
+
+#define	S_PERR_IMSG_HINT_FIFO    29
+#define	V_PERR_IMSG_HINT_FIFO(x) ((x) << S_PERR_IMSG_HINT_FIFO)
+#define	F_PERR_IMSG_HINT_FIFO    V_PERR_IMSG_HINT_FIFO(1U)
+
+#define	S_PERR_MC_PC    28
+#define	V_PERR_MC_PC(x) ((x) << S_PERR_MC_PC)
+#define	F_PERR_MC_PC    V_PERR_MC_PC(1U)
+
+#define	S_PERR_MC_IGR_CTXT    27
+#define	V_PERR_MC_IGR_CTXT(x) ((x) << S_PERR_MC_IGR_CTXT)
+#define	F_PERR_MC_IGR_CTXT    V_PERR_MC_IGR_CTXT(1U)
+
+#define	S_PERR_MC_EGR_CTXT    26
+#define	V_PERR_MC_EGR_CTXT(x) ((x) << S_PERR_MC_EGR_CTXT)
+#define	F_PERR_MC_EGR_CTXT    V_PERR_MC_EGR_CTXT(1U)
+
+#define	S_PERR_MC_FLM    25
+#define	V_PERR_MC_FLM(x) ((x) << S_PERR_MC_FLM)
+#define	F_PERR_MC_FLM    V_PERR_MC_FLM(1U)
+
+#define	S_PERR_PC_MCTAG    24
+#define	V_PERR_PC_MCTAG(x) ((x) << S_PERR_PC_MCTAG)
+#define	F_PERR_PC_MCTAG    V_PERR_PC_MCTAG(1U)
+
+#define	S_PERR_PC_CHPI_RSP1    23
+#define	V_PERR_PC_CHPI_RSP1(x) ((x) << S_PERR_PC_CHPI_RSP1)
+#define	F_PERR_PC_CHPI_RSP1    V_PERR_PC_CHPI_RSP1(1U)
+
+#define	S_PERR_PC_CHPI_RSP0    22
+#define	V_PERR_PC_CHPI_RSP0(x) ((x) << S_PERR_PC_CHPI_RSP0)
+#define	F_PERR_PC_CHPI_RSP0    V_PERR_PC_CHPI_RSP0(1U)
+
+#define	S_PERR_DBP_PC_RSP_FIFO3    21
+#define	V_PERR_DBP_PC_RSP_FIFO3(x) ((x) << S_PERR_DBP_PC_RSP_FIFO3)
+#define	F_PERR_DBP_PC_RSP_FIFO3    V_PERR_DBP_PC_RSP_FIFO3(1U)
+
+#define	S_PERR_DBP_PC_RSP_FIFO2    20
+#define	V_PERR_DBP_PC_RSP_FIFO2(x) ((x) << S_PERR_DBP_PC_RSP_FIFO2)
+#define	F_PERR_DBP_PC_RSP_FIFO2    V_PERR_DBP_PC_RSP_FIFO2(1U)
+
+#define	S_PERR_DBP_PC_RSP_FIFO1    19
+#define	V_PERR_DBP_PC_RSP_FIFO1(x) ((x) << S_PERR_DBP_PC_RSP_FIFO1)
+#define	F_PERR_DBP_PC_RSP_FIFO1    V_PERR_DBP_PC_RSP_FIFO1(1U)
+
+#define	S_PERR_DBP_PC_RSP_FIFO0    18
+#define	V_PERR_DBP_PC_RSP_FIFO0(x) ((x) << S_PERR_DBP_PC_RSP_FIFO0)
+#define	F_PERR_DBP_PC_RSP_FIFO0    V_PERR_DBP_PC_RSP_FIFO0(1U)
+
+#define	S_PERR_DMARBT    17
+#define	V_PERR_DMARBT(x) ((x) << S_PERR_DMARBT)
+#define	F_PERR_DMARBT    V_PERR_DMARBT(1U)
+
+#define	S_PERR_FLM_DBPFIFO    16
+#define	V_PERR_FLM_DBPFIFO(x) ((x) << S_PERR_FLM_DBPFIFO)
+#define	F_PERR_FLM_DBPFIFO    V_PERR_FLM_DBPFIFO(1U)
+
+#define	S_PERR_FLM_MCREQ_FIFO    15
+#define	V_PERR_FLM_MCREQ_FIFO(x) ((x) << S_PERR_FLM_MCREQ_FIFO)
+#define	F_PERR_FLM_MCREQ_FIFO    V_PERR_FLM_MCREQ_FIFO(1U)
+
+#define	S_PERR_FLM_HINTFIFO    14
+#define	V_PERR_FLM_HINTFIFO(x) ((x) << S_PERR_FLM_HINTFIFO)
+#define	F_PERR_FLM_HINTFIFO    V_PERR_FLM_HINTFIFO(1U)
+
+#define	S_PERR_ALIGN_CTL_FIFO3    13
+#define	V_PERR_ALIGN_CTL_FIFO3(x) ((x) << S_PERR_ALIGN_CTL_FIFO3)
+#define	F_PERR_ALIGN_CTL_FIFO3    V_PERR_ALIGN_CTL_FIFO3(1U)
+
+#define	S_PERR_ALIGN_CTL_FIFO2    12
+#define	V_PERR_ALIGN_CTL_FIFO2(x) ((x) << S_PERR_ALIGN_CTL_FIFO2)
+#define	F_PERR_ALIGN_CTL_FIFO2    V_PERR_ALIGN_CTL_FIFO2(1U)
+
+#define	S_PERR_ALIGN_CTL_FIFO1    11
+#define	V_PERR_ALIGN_CTL_FIFO1(x) ((x) << S_PERR_ALIGN_CTL_FIFO1)
+#define	F_PERR_ALIGN_CTL_FIFO1    V_PERR_ALIGN_CTL_FIFO1(1U)
+
+#define	S_PERR_ALIGN_CTL_FIFO0    10
+#define	V_PERR_ALIGN_CTL_FIFO0(x) ((x) << S_PERR_ALIGN_CTL_FIFO0)
+#define	F_PERR_ALIGN_CTL_FIFO0    V_PERR_ALIGN_CTL_FIFO0(1U)
+
+#define	S_PERR_EDMA_FIFO3    9
+#define	V_PERR_EDMA_FIFO3(x) ((x) << S_PERR_EDMA_FIFO3)
+#define	F_PERR_EDMA_FIFO3    V_PERR_EDMA_FIFO3(1U)
+
+#define	S_PERR_EDMA_FIFO2    8
+#define	V_PERR_EDMA_FIFO2(x) ((x) << S_PERR_EDMA_FIFO2)
+#define	F_PERR_EDMA_FIFO2    V_PERR_EDMA_FIFO2(1U)
+
+#define	S_PERR_EDMA_FIFO1    7
+#define	V_PERR_EDMA_FIFO1(x) ((x) << S_PERR_EDMA_FIFO1)
+#define	F_PERR_EDMA_FIFO1    V_PERR_EDMA_FIFO1(1U)
+
+#define	S_PERR_EDMA_FIFO0    6
+#define	V_PERR_EDMA_FIFO0(x) ((x) << S_PERR_EDMA_FIFO0)
+#define	F_PERR_EDMA_FIFO0    V_PERR_EDMA_FIFO0(1U)
+
+#define	S_PERR_PD_FIFO3    5
+#define	V_PERR_PD_FIFO3(x) ((x) << S_PERR_PD_FIFO3)
+#define	F_PERR_PD_FIFO3    V_PERR_PD_FIFO3(1U)
+
+#define	S_PERR_PD_FIFO2    4
+#define	V_PERR_PD_FIFO2(x) ((x) << S_PERR_PD_FIFO2)
+#define	F_PERR_PD_FIFO2    V_PERR_PD_FIFO2(1U)
+
+#define	S_PERR_PD_FIFO1    3
+#define	V_PERR_PD_FIFO1(x) ((x) << S_PERR_PD_FIFO1)
+#define	F_PERR_PD_FIFO1    V_PERR_PD_FIFO1(1U)
+
+#define	S_PERR_PD_FIFO0    2
+#define	V_PERR_PD_FIFO0(x) ((x) << S_PERR_PD_FIFO0)
+#define	F_PERR_PD_FIFO0    V_PERR_PD_FIFO0(1U)
+
+#define	S_PERR_ING_CTXT_MIFRSP    1
+#define	V_PERR_ING_CTXT_MIFRSP(x) ((x) << S_PERR_ING_CTXT_MIFRSP)
+#define	F_PERR_ING_CTXT_MIFRSP    V_PERR_ING_CTXT_MIFRSP(1U)
+
+#define	S_PERR_EGR_CTXT_MIFRSP    0
+#define	V_PERR_EGR_CTXT_MIFRSP(x) ((x) << S_PERR_EGR_CTXT_MIFRSP)
+#define	F_PERR_EGR_CTXT_MIFRSP    V_PERR_EGR_CTXT_MIFRSP(1U)
+
+#define	A_SGE_INT_ENABLE1 0x1028
+#define	A_SGE_PERR_ENABLE1 0x102c
+#define	A_SGE_INT_CAUSE2 0x1030
+
+#define	S_PERR_HINT_DELAY_FIFO1    30
+#define	V_PERR_HINT_DELAY_FIFO1(x) ((x) << S_PERR_HINT_DELAY_FIFO1)
+#define	F_PERR_HINT_DELAY_FIFO1    V_PERR_HINT_DELAY_FIFO1(1U)
+
+#define	S_PERR_HINT_DELAY_FIFO0    29
+#define	V_PERR_HINT_DELAY_FIFO0(x) ((x) << S_PERR_HINT_DELAY_FIFO0)
+#define	F_PERR_HINT_DELAY_FIFO0    V_PERR_HINT_DELAY_FIFO0(1U)
+
+#define	S_PERR_IMSG_PD_FIFO    28
+#define	V_PERR_IMSG_PD_FIFO(x) ((x) << S_PERR_IMSG_PD_FIFO)
+#define	F_PERR_IMSG_PD_FIFO    V_PERR_IMSG_PD_FIFO(1U)
+
+#define	S_PERR_ULPTX_FIFO1    27
+#define	V_PERR_ULPTX_FIFO1(x) ((x) << S_PERR_ULPTX_FIFO1)
+#define	F_PERR_ULPTX_FIFO1    V_PERR_ULPTX_FIFO1(1U)
+
+#define	S_PERR_ULPTX_FIFO0    26
+#define	V_PERR_ULPTX_FIFO0(x) ((x) << S_PERR_ULPTX_FIFO0)
+#define	F_PERR_ULPTX_FIFO0    V_PERR_ULPTX_FIFO0(1U)
+
+#define	S_PERR_IDMA2IMSG_FIFO1    25
+#define	V_PERR_IDMA2IMSG_FIFO1(x) ((x) << S_PERR_IDMA2IMSG_FIFO1)
+#define	F_PERR_IDMA2IMSG_FIFO1    V_PERR_IDMA2IMSG_FIFO1(1U)
+
+#define	S_PERR_IDMA2IMSG_FIFO0    24
+#define	V_PERR_IDMA2IMSG_FIFO0(x) ((x) << S_PERR_IDMA2IMSG_FIFO0)
+#define	F_PERR_IDMA2IMSG_FIFO0    V_PERR_IDMA2IMSG_FIFO0(1U)
+
+#define	S_PERR_HEADERSPLIT_FIFO1    23
+#define	V_PERR_HEADERSPLIT_FIFO1(x) ((x) << S_PERR_HEADERSPLIT_FIFO1)
+#define	F_PERR_HEADERSPLIT_FIFO1    V_PERR_HEADERSPLIT_FIFO1(1U)
+
+#define	S_PERR_HEADERSPLIT_FIFO0    22
+#define	V_PERR_HEADERSPLIT_FIFO0(x) ((x) << S_PERR_HEADERSPLIT_FIFO0)
+#define	F_PERR_HEADERSPLIT_FIFO0    V_PERR_HEADERSPLIT_FIFO0(1U)
+
+#define	S_PERR_ESWITCH_FIFO3    21
+#define	V_PERR_ESWITCH_FIFO3(x) ((x) << S_PERR_ESWITCH_FIFO3)
+#define	F_PERR_ESWITCH_FIFO3    V_PERR_ESWITCH_FIFO3(1U)
+
+#define	S_PERR_ESWITCH_FIFO2    20
+#define	V_PERR_ESWITCH_FIFO2(x) ((x) << S_PERR_ESWITCH_FIFO2)
+#define	F_PERR_ESWITCH_FIFO2    V_PERR_ESWITCH_FIFO2(1U)
+
+#define	S_PERR_ESWITCH_FIFO1    19
+#define	V_PERR_ESWITCH_FIFO1(x) ((x) << S_PERR_ESWITCH_FIFO1)
+#define	F_PERR_ESWITCH_FIFO1    V_PERR_ESWITCH_FIFO1(1U)
+
+#define	S_PERR_ESWITCH_FIFO0    18
+#define	V_PERR_ESWITCH_FIFO0(x) ((x) << S_PERR_ESWITCH_FIFO0)
+#define	F_PERR_ESWITCH_FIFO0    V_PERR_ESWITCH_FIFO0(1U)
+
+#define	S_PERR_PC_DBP1    17
+#define	V_PERR_PC_DBP1(x) ((x) << S_PERR_PC_DBP1)
+#define	F_PERR_PC_DBP1    V_PERR_PC_DBP1(1U)
+
+#define	S_PERR_PC_DBP0    16
+#define	V_PERR_PC_DBP0(x) ((x) << S_PERR_PC_DBP0)
+#define	F_PERR_PC_DBP0    V_PERR_PC_DBP0(1U)
+
+#define	S_PERR_IMSG_OB_FIFO    15
+#define	V_PERR_IMSG_OB_FIFO(x) ((x) << S_PERR_IMSG_OB_FIFO)
+#define	F_PERR_IMSG_OB_FIFO    V_PERR_IMSG_OB_FIFO(1U)
+
+#define	S_PERR_CONM_SRAM    14
+#define	V_PERR_CONM_SRAM(x) ((x) << S_PERR_CONM_SRAM)
+#define	F_PERR_CONM_SRAM    V_PERR_CONM_SRAM(1U)
+
+#define	S_PERR_PC_MC_RSP    13
+#define	V_PERR_PC_MC_RSP(x) ((x) << S_PERR_PC_MC_RSP)
+#define	F_PERR_PC_MC_RSP    V_PERR_PC_MC_RSP(1U)
+
+#define	S_PERR_ISW_IDMA0_FIFO    12
+#define	V_PERR_ISW_IDMA0_FIFO(x) ((x) << S_PERR_ISW_IDMA0_FIFO)
+#define	F_PERR_ISW_IDMA0_FIFO    V_PERR_ISW_IDMA0_FIFO(1U)
+
+#define	S_PERR_ISW_IDMA1_FIFO    11
+#define	V_PERR_ISW_IDMA1_FIFO(x) ((x) << S_PERR_ISW_IDMA1_FIFO)
+#define	F_PERR_ISW_IDMA1_FIFO    V_PERR_ISW_IDMA1_FIFO(1U)
+
+#define	S_PERR_ISW_DBP_FIFO    10
+#define	V_PERR_ISW_DBP_FIFO(x) ((x) << S_PERR_ISW_DBP_FIFO)
+#define	F_PERR_ISW_DBP_FIFO    V_PERR_ISW_DBP_FIFO(1U)
+
+#define	S_PERR_ISW_GTS_FIFO    9
+#define	V_PERR_ISW_GTS_FIFO(x) ((x) << S_PERR_ISW_GTS_FIFO)
+#define	F_PERR_ISW_GTS_FIFO    V_PERR_ISW_GTS_FIFO(1U)
+
+#define	S_PERR_ITP_EVR    8
+#define	V_PERR_ITP_EVR(x) ((x) << S_PERR_ITP_EVR)
+#define	F_PERR_ITP_EVR    V_PERR_ITP_EVR(1U)
+
+#define	S_PERR_FLM_CNTXMEM    7
+#define	V_PERR_FLM_CNTXMEM(x) ((x) << S_PERR_FLM_CNTXMEM)
+#define	F_PERR_FLM_CNTXMEM    V_PERR_FLM_CNTXMEM(1U)
+
+#define	S_PERR_FLM_L1CACHE    6
+#define	V_PERR_FLM_L1CACHE(x) ((x) << S_PERR_FLM_L1CACHE)
+#define	F_PERR_FLM_L1CACHE    V_PERR_FLM_L1CACHE(1U)
+
+#define	S_PERR_DBP_HINT_FIFO    5
+#define	V_PERR_DBP_HINT_FIFO(x) ((x) << S_PERR_DBP_HINT_FIFO)
+#define	F_PERR_DBP_HINT_FIFO    V_PERR_DBP_HINT_FIFO(1U)
+
+#define	S_PERR_DBP_HP_FIFO    4
+#define	V_PERR_DBP_HP_FIFO(x) ((x) << S_PERR_DBP_HP_FIFO)
+#define	F_PERR_DBP_HP_FIFO    V_PERR_DBP_HP_FIFO(1U)
+
+#define	S_PERR_DBP_LP_FIFO    3
+#define	V_PERR_DBP_LP_FIFO(x) ((x) << S_PERR_DBP_LP_FIFO)
+#define	F_PERR_DBP_LP_FIFO    V_PERR_DBP_LP_FIFO(1U)
+
+#define	S_PERR_ING_CTXT_CACHE    2
+#define	V_PERR_ING_CTXT_CACHE(x) ((x) << S_PERR_ING_CTXT_CACHE)
+#define	F_PERR_ING_CTXT_CACHE    V_PERR_ING_CTXT_CACHE(1U)
+
+#define	S_PERR_EGR_CTXT_CACHE    1
+#define	V_PERR_EGR_CTXT_CACHE(x) ((x) << S_PERR_EGR_CTXT_CACHE)
+#define	F_PERR_EGR_CTXT_CACHE    V_PERR_EGR_CTXT_CACHE(1U)
+
+#define	S_PERR_BASE_SIZE    0
+#define	V_PERR_BASE_SIZE(x) ((x) << S_PERR_BASE_SIZE)
+#define	F_PERR_BASE_SIZE    V_PERR_BASE_SIZE(1U)
+
+#define	A_SGE_INT_ENABLE2 0x1034
+#define	A_SGE_PERR_ENABLE2 0x1038
+#define	A_SGE_INT_CAUSE3 0x103c
+
+#define	S_ERR_FLM_DBP    31
+#define	V_ERR_FLM_DBP(x) ((x) << S_ERR_FLM_DBP)
+#define	F_ERR_FLM_DBP    V_ERR_FLM_DBP(1U)
+
+#define	S_ERR_FLM_IDMA1    30
+#define	V_ERR_FLM_IDMA1(x) ((x) << S_ERR_FLM_IDMA1)
+#define	F_ERR_FLM_IDMA1    V_ERR_FLM_IDMA1(1U)
+
+#define	S_ERR_FLM_IDMA0    29
+#define	V_ERR_FLM_IDMA0(x) ((x) << S_ERR_FLM_IDMA0)
+#define	F_ERR_FLM_IDMA0    V_ERR_FLM_IDMA0(1U)
+
+#define	S_ERR_FLM_HINT    28
+#define	V_ERR_FLM_HINT(x) ((x) << S_ERR_FLM_HINT)
+#define	F_ERR_FLM_HINT    V_ERR_FLM_HINT(1U)
+
+#define	S_ERR_PCIE_ERROR3    27
+#define	V_ERR_PCIE_ERROR3(x) ((x) << S_ERR_PCIE_ERROR3)
+#define	F_ERR_PCIE_ERROR3    V_ERR_PCIE_ERROR3(1U)
+
+#define	S_ERR_PCIE_ERROR2    26
+#define	V_ERR_PCIE_ERROR2(x) ((x) << S_ERR_PCIE_ERROR2)
+#define	F_ERR_PCIE_ERROR2    V_ERR_PCIE_ERROR2(1U)
+
+#define	S_ERR_PCIE_ERROR1    25
+#define	V_ERR_PCIE_ERROR1(x) ((x) << S_ERR_PCIE_ERROR1)
+#define	F_ERR_PCIE_ERROR1    V_ERR_PCIE_ERROR1(1U)
+
+#define	S_ERR_PCIE_ERROR0    24
+#define	V_ERR_PCIE_ERROR0(x) ((x) << S_ERR_PCIE_ERROR0)
+#define	F_ERR_PCIE_ERROR0    V_ERR_PCIE_ERROR0(1U)
+
+#define	S_ERR_TIMER_ABOVE_MAX_QID    23
+#define	V_ERR_TIMER_ABOVE_MAX_QID(x) ((x) << S_ERR_TIMER_ABOVE_MAX_QID)
+#define	F_ERR_TIMER_ABOVE_MAX_QID    V_ERR_TIMER_ABOVE_MAX_QID(1U)
+
+#define	S_ERR_CPL_EXCEED_IQE_SIZE    22
+#define	V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE)
+#define	F_ERR_CPL_EXCEED_IQE_SIZE    V_ERR_CPL_EXCEED_IQE_SIZE(1U)
+
+#define	S_ERR_INVALID_CIDX_INC    21
+#define	V_ERR_INVALID_CIDX_INC(x) ((x) << S_ERR_INVALID_CIDX_INC)
+#define	F_ERR_INVALID_CIDX_INC    V_ERR_INVALID_CIDX_INC(1U)
+
+#define	S_ERR_ITP_TIME_PAUSED    20
+#define	V_ERR_ITP_TIME_PAUSED(x) ((x) << S_ERR_ITP_TIME_PAUSED)
+#define	F_ERR_ITP_TIME_PAUSED    V_ERR_ITP_TIME_PAUSED(1U)
+
+#define	S_ERR_CPL_OPCODE_0    19
+#define	V_ERR_CPL_OPCODE_0(x) ((x) << S_ERR_CPL_OPCODE_0)
+#define	F_ERR_CPL_OPCODE_0    V_ERR_CPL_OPCODE_0(1U)
+
+#define	S_ERR_DROPPED_DB    18
+#define	V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB)
+#define	F_ERR_DROPPED_DB    V_ERR_DROPPED_DB(1U)
+
+#define	S_ERR_DATA_CPL_ON_HIGH_QID1    17
+#define	V_ERR_DATA_CPL_ON_HIGH_QID1(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID1)
+#define	F_ERR_DATA_CPL_ON_HIGH_QID1    V_ERR_DATA_CPL_ON_HIGH_QID1(1U)
+
+#define	S_ERR_DATA_CPL_ON_HIGH_QID0    16
+#define	V_ERR_DATA_CPL_ON_HIGH_QID0(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID0)
+#define	F_ERR_DATA_CPL_ON_HIGH_QID0    V_ERR_DATA_CPL_ON_HIGH_QID0(1U)
+
+#define	S_ERR_BAD_DB_PIDX3    15
+#define	V_ERR_BAD_DB_PIDX3(x) ((x) << S_ERR_BAD_DB_PIDX3)
+#define	F_ERR_BAD_DB_PIDX3    V_ERR_BAD_DB_PIDX3(1U)
+
+#define	S_ERR_BAD_DB_PIDX2    14
+#define	V_ERR_BAD_DB_PIDX2(x) ((x) << S_ERR_BAD_DB_PIDX2)
+#define	F_ERR_BAD_DB_PIDX2    V_ERR_BAD_DB_PIDX2(1U)
+
+#define	S_ERR_BAD_DB_PIDX1    13
+#define	V_ERR_BAD_DB_PIDX1(x) ((x) << S_ERR_BAD_DB_PIDX1)
+#define	F_ERR_BAD_DB_PIDX1    V_ERR_BAD_DB_PIDX1(1U)
+
+#define	S_ERR_BAD_DB_PIDX0    12
+#define	V_ERR_BAD_DB_PIDX0(x) ((x) << S_ERR_BAD_DB_PIDX0)
+#define	F_ERR_BAD_DB_PIDX0    V_ERR_BAD_DB_PIDX0(1U)
+
+#define	S_ERR_ING_PCIE_CHAN    11
+#define	V_ERR_ING_PCIE_CHAN(x) ((x) << S_ERR_ING_PCIE_CHAN)
+#define	F_ERR_ING_PCIE_CHAN    V_ERR_ING_PCIE_CHAN(1U)
+
+#define	S_ERR_ING_CTXT_PRIO    10
+#define	V_ERR_ING_CTXT_PRIO(x) ((x) << S_ERR_ING_CTXT_PRIO)
+#define	F_ERR_ING_CTXT_PRIO    V_ERR_ING_CTXT_PRIO(1U)
+
+#define	S_ERR_EGR_CTXT_PRIO    9
+#define	V_ERR_EGR_CTXT_PRIO(x) ((x) << S_ERR_EGR_CTXT_PRIO)
+#define	F_ERR_EGR_CTXT_PRIO    V_ERR_EGR_CTXT_PRIO(1U)
+
+#define	S_DBFIFO_HP_INT    8
+#define	V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
+#define	F_DBFIFO_HP_INT    V_DBFIFO_HP_INT(1U)
+
+#define	S_DBFIFO_LP_INT    7
+#define	V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
+#define	F_DBFIFO_LP_INT    V_DBFIFO_LP_INT(1U)
+
+#define	S_REG_ADDRESS_ERR    6
+#define	V_REG_ADDRESS_ERR(x) ((x) << S_REG_ADDRESS_ERR)
+#define	F_REG_ADDRESS_ERR    V_REG_ADDRESS_ERR(1U)
+
+#define	S_INGRESS_SIZE_ERR    5
+#define	V_INGRESS_SIZE_ERR(x) ((x) << S_INGRESS_SIZE_ERR)
+#define	F_INGRESS_SIZE_ERR    V_INGRESS_SIZE_ERR(1U)
+
+#define	S_EGRESS_SIZE_ERR    4
+#define	V_EGRESS_SIZE_ERR(x) ((x) << S_EGRESS_SIZE_ERR)
+#define	F_EGRESS_SIZE_ERR    V_EGRESS_SIZE_ERR(1U)
+
+#define	S_ERR_INV_CTXT3    3
+#define	V_ERR_INV_CTXT3(x) ((x) << S_ERR_INV_CTXT3)
+#define	F_ERR_INV_CTXT3    V_ERR_INV_CTXT3(1U)
+
+#define	S_ERR_INV_CTXT2    2
+#define	V_ERR_INV_CTXT2(x) ((x) << S_ERR_INV_CTXT2)
+#define	F_ERR_INV_CTXT2    V_ERR_INV_CTXT2(1U)
+
+#define	S_ERR_INV_CTXT1    1
+#define	V_ERR_INV_CTXT1(x) ((x) << S_ERR_INV_CTXT1)
+#define	F_ERR_INV_CTXT1    V_ERR_INV_CTXT1(1U)
+
+#define	S_ERR_INV_CTXT0    0
+#define	V_ERR_INV_CTXT0(x) ((x) << S_ERR_INV_CTXT0)
+#define	F_ERR_INV_CTXT0    V_ERR_INV_CTXT0(1U)
+
+#define	A_SGE_INT_ENABLE3 0x1040
+#define	A_SGE_FL_BUFFER_SIZE0 0x1044
+
+#define	S_SIZE    4
+#define	M_SIZE    0xfffffffU
+#define	V_SIZE(x) ((x) << S_SIZE)
+#define	G_SIZE(x) (((x) >> S_SIZE) & M_SIZE)
+
+#define	A_SGE_FL_BUFFER_SIZE1 0x1048
+#define	A_SGE_FL_BUFFER_SIZE2 0x104c
+#define	A_SGE_FL_BUFFER_SIZE3 0x1050
+#define	A_SGE_FL_BUFFER_SIZE4 0x1054
+#define	A_SGE_FL_BUFFER_SIZE5 0x1058
+#define	A_SGE_FL_BUFFER_SIZE6 0x105c
+#define	A_SGE_FL_BUFFER_SIZE7 0x1060
+#define	A_SGE_FL_BUFFER_SIZE8 0x1064
+#define	A_SGE_FL_BUFFER_SIZE9 0x1068
+#define	A_SGE_FL_BUFFER_SIZE10 0x106c
+#define	A_SGE_FL_BUFFER_SIZE11 0x1070
+#define	A_SGE_FL_BUFFER_SIZE12 0x1074
+#define	A_SGE_FL_BUFFER_SIZE13 0x1078
+#define	A_SGE_FL_BUFFER_SIZE14 0x107c
+#define	A_SGE_FL_BUFFER_SIZE15 0x1080
+#define	A_SGE_DBQ_CTXT_BADDR 0x1084
+
+#define	S_BASEADDR    3
+#define	M_BASEADDR    0x1fffffffU
+#define	V_BASEADDR(x) ((x) << S_BASEADDR)
+#define	G_BASEADDR(x) (((x) >> S_BASEADDR) & M_BASEADDR)
+
+#define	A_SGE_IMSG_CTXT_BADDR 0x1088
+#define	A_SGE_FLM_CACHE_BADDR 0x108c
+#define	A_SGE_FLM_CFG 0x1090
+
+#define	S_OPMODE    26
+#define	M_OPMODE    0x3fU
+#define	V_OPMODE(x) ((x) << S_OPMODE)
+#define	G_OPMODE(x) (((x) >> S_OPMODE) & M_OPMODE)
+
+#define	S_NOHDR    18
+#define	V_NOHDR(x) ((x) << S_NOHDR)
+#define	F_NOHDR    V_NOHDR(1U)
+
+#define	S_CACHEPTRCNT    16
+#define	M_CACHEPTRCNT    0x3U
+#define	V_CACHEPTRCNT(x) ((x) << S_CACHEPTRCNT)
+#define	G_CACHEPTRCNT(x) (((x) >> S_CACHEPTRCNT) & M_CACHEPTRCNT)
+
+#define	S_EDRAMPTRCNT    14
+#define	M_EDRAMPTRCNT    0x3U
+#define	V_EDRAMPTRCNT(x) ((x) << S_EDRAMPTRCNT)
+#define	G_EDRAMPTRCNT(x) (((x) >> S_EDRAMPTRCNT) & M_EDRAMPTRCNT)
+
+#define	S_HDRSTARTFLQ    11
+#define	M_HDRSTARTFLQ    0x7U
+#define	V_HDRSTARTFLQ(x) ((x) << S_HDRSTARTFLQ)
+#define	G_HDRSTARTFLQ(x) (((x) >> S_HDRSTARTFLQ) & M_HDRSTARTFLQ)
+
+#define	S_FETCHTHRESH    6
+#define	M_FETCHTHRESH    0x1fU
+#define	V_FETCHTHRESH(x) ((x) << S_FETCHTHRESH)
+#define	G_FETCHTHRESH(x) (((x) >> S_FETCHTHRESH) & M_FETCHTHRESH)
+
+#define	S_CREDITCNT    4
+#define	M_CREDITCNT    0x3U
+#define	V_CREDITCNT(x) ((x) << S_CREDITCNT)
+#define	G_CREDITCNT(x) (((x) >> S_CREDITCNT) & M_CREDITCNT)
+
+#define	S_NOEDRAM    0
+#define	V_NOEDRAM(x) ((x) << S_NOEDRAM)
+#define	F_NOEDRAM    V_NOEDRAM(1U)
+
+#define	A_SGE_CONM_CTRL 0x1094
+
+#define	S_EGRTHRESHOLD    8
+#define	M_EGRTHRESHOLD    0x3fU
+#define	V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD)
+#define	G_EGRTHRESHOLD(x) (((x) >> S_EGRTHRESHOLD) & M_EGRTHRESHOLD)
+
+#define	S_INGTHRESHOLD    2
+#define	M_INGTHRESHOLD    0x3fU
+#define	V_INGTHRESHOLD(x) ((x) << S_INGTHRESHOLD)
+#define	G_INGTHRESHOLD(x) (((x) >> S_INGTHRESHOLD) & M_INGTHRESHOLD)
+
+#define	S_MPS_ENABLE    1
+#define	V_MPS_ENABLE(x) ((x) << S_MPS_ENABLE)
+#define	F_MPS_ENABLE    V_MPS_ENABLE(1U)
+
+#define	S_TP_ENABLE    0
+#define	V_TP_ENABLE(x) ((x) << S_TP_ENABLE)
+#define	F_TP_ENABLE    V_TP_ENABLE(1U)
+
+#define	A_SGE_TIMESTAMP_LO 0x1098
+#define	A_SGE_TIMESTAMP_HI 0x109c
+
+#define	S_TSOP    28
+#define	M_TSOP    0x3U
+#define	V_TSOP(x) ((x) << S_TSOP)
+#define	G_TSOP(x) (((x) >> S_TSOP) & M_TSOP)
+
+#define	S_TSVAL    0
+#define	M_TSVAL    0xfffffffU
+#define	V_TSVAL(x) ((x) << S_TSVAL)
+#define	G_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL)
+
+#define	A_SGE_INGRESS_RX_THRESHOLD 0x10a0
+
+#define	S_THRESHOLD_0    24
+#define	M_THRESHOLD_0    0x3fU
+#define	V_THRESHOLD_0(x) ((x) << S_THRESHOLD_0)
+#define	G_THRESHOLD_0(x) (((x) >> S_THRESHOLD_0) & M_THRESHOLD_0)
+
+#define	S_THRESHOLD_1    16
+#define	M_THRESHOLD_1    0x3fU
+#define	V_THRESHOLD_1(x) ((x) << S_THRESHOLD_1)
+#define	G_THRESHOLD_1(x) (((x) >> S_THRESHOLD_1) & M_THRESHOLD_1)
+
+#define	S_THRESHOLD_2    8
+#define	M_THRESHOLD_2    0x3fU
+#define	V_THRESHOLD_2(x) ((x) << S_THRESHOLD_2)
+#define	G_THRESHOLD_2(x) (((x) >> S_THRESHOLD_2) & M_THRESHOLD_2)
+
+#define	S_THRESHOLD_3    0
+#define	M_THRESHOLD_3    0x3fU
+#define	V_THRESHOLD_3(x) ((x) << S_THRESHOLD_3)
+#define	G_THRESHOLD_3(x) (((x) >> S_THRESHOLD_3) & M_THRESHOLD_3)
+
+#define	A_SGE_DBFIFO_STATUS 0x10a4
+
+#define	S_HP_INT_THRESH    28
+#define	M_HP_INT_THRESH    0xfU
+#define	V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
+#define	G_HP_INT_THRESH(x) (((x) >> S_HP_INT_THRESH) & M_HP_INT_THRESH)
+
+#define	S_HP_COUNT    16
+#define	M_HP_COUNT    0x7ffU
+#define	V_HP_COUNT(x) ((x) << S_HP_COUNT)
+#define	G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
+
+#define	S_LP_INT_THRESH    12
+#define	M_LP_INT_THRESH    0xfU
+#define	V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
+#define	G_LP_INT_THRESH(x) (((x) >> S_LP_INT_THRESH) & M_LP_INT_THRESH)
+
+#define	S_LP_COUNT    0
+#define	M_LP_COUNT    0x7ffU
+#define	V_LP_COUNT(x) ((x) << S_LP_COUNT)
+#define	G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
+
+#define	A_SGE_DOORBELL_CONTROL 0x10a8
+
+#define	S_HINTDEPTHCTL    27
+#define	M_HINTDEPTHCTL    0x1fU
+#define	V_HINTDEPTHCTL(x) ((x) << S_HINTDEPTHCTL)
+#define	G_HINTDEPTHCTL(x) (((x) >> S_HINTDEPTHCTL) & M_HINTDEPTHCTL)
+
+#define	S_NOCOALESCE    26
+#define	V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
+#define	F_NOCOALESCE    V_NOCOALESCE(1U)
+
+#define	S_HP_WEIGHT    24
+#define	M_HP_WEIGHT    0x3U
+#define	V_HP_WEIGHT(x) ((x) << S_HP_WEIGHT)
+#define	G_HP_WEIGHT(x) (((x) >> S_HP_WEIGHT) & M_HP_WEIGHT)
+
+#define	S_HP_DISABLE    23
+#define	V_HP_DISABLE(x) ((x) << S_HP_DISABLE)
+#define	F_HP_DISABLE    V_HP_DISABLE(1U)
+
+#define	S_FORCEUSERDBTOLP    22
+#define	V_FORCEUSERDBTOLP(x) ((x) << S_FORCEUSERDBTOLP)
+#define	F_FORCEUSERDBTOLP    V_FORCEUSERDBTOLP(1U)
+
+#define	S_FORCEVFPF0DBTOLP    21
+#define	V_FORCEVFPF0DBTOLP(x) ((x) << S_FORCEVFPF0DBTOLP)
+#define	F_FORCEVFPF0DBTOLP    V_FORCEVFPF0DBTOLP(1U)
+
+#define	S_FORCEVFPF1DBTOLP    20
+#define	V_FORCEVFPF1DBTOLP(x) ((x) << S_FORCEVFPF1DBTOLP)
+#define	F_FORCEVFPF1DBTOLP    V_FORCEVFPF1DBTOLP(1U)
+
+#define	S_FORCEVFPF2DBTOLP    19
+#define	V_FORCEVFPF2DBTOLP(x) ((x) << S_FORCEVFPF2DBTOLP)
+#define	F_FORCEVFPF2DBTOLP    V_FORCEVFPF2DBTOLP(1U)
+
+#define	S_FORCEVFPF3DBTOLP    18
+#define	V_FORCEVFPF3DBTOLP(x) ((x) << S_FORCEVFPF3DBTOLP)
+#define	F_FORCEVFPF3DBTOLP    V_FORCEVFPF3DBTOLP(1U)
+
+#define	S_FORCEVFPF4DBTOLP    17
+#define	V_FORCEVFPF4DBTOLP(x) ((x) << S_FORCEVFPF4DBTOLP)
+#define	F_FORCEVFPF4DBTOLP    V_FORCEVFPF4DBTOLP(1U)
+
+#define	S_FORCEVFPF5DBTOLP    16
+#define	V_FORCEVFPF5DBTOLP(x) ((x) << S_FORCEVFPF5DBTOLP)
+#define	F_FORCEVFPF5DBTOLP    V_FORCEVFPF5DBTOLP(1U)
+
+#define	S_FORCEVFPF6DBTOLP    15
+#define	V_FORCEVFPF6DBTOLP(x) ((x) << S_FORCEVFPF6DBTOLP)
+#define	F_FORCEVFPF6DBTOLP    V_FORCEVFPF6DBTOLP(1U)
+
+#define	S_FORCEVFPF7DBTOLP    14
+#define	V_FORCEVFPF7DBTOLP(x) ((x) << S_FORCEVFPF7DBTOLP)
+#define	F_FORCEVFPF7DBTOLP    V_FORCEVFPF7DBTOLP(1U)
+
+#define	S_ENABLE_DROP    13
+#define	V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
+#define	F_ENABLE_DROP    V_ENABLE_DROP(1U)
+
+#define	S_DROP_TIMEOUT    1
+#define	M_DROP_TIMEOUT    0xfffU
+#define	V_DROP_TIMEOUT(x) ((x) << S_DROP_TIMEOUT)
+#define	G_DROP_TIMEOUT(x) (((x) >> S_DROP_TIMEOUT) & M_DROP_TIMEOUT)
+
+#define	S_DROPPED_DB    0
+#define	V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
+#define	F_DROPPED_DB    V_DROPPED_DB(1U)
+
+#define	A_SGE_DROPPED_DOORBELL 0x10ac
+#define	A_SGE_DOORBELL_THROTTLE_CONTROL 0x10b0
+
+#define	S_THROTTLE_COUNT    1
+#define	M_THROTTLE_COUNT    0xfffU
+#define	V_THROTTLE_COUNT(x) ((x) << S_THROTTLE_COUNT)
+#define	G_THROTTLE_COUNT(x) (((x) >> S_THROTTLE_COUNT) & M_THROTTLE_COUNT)
+
+#define	S_THROTTLE_ENABLE    0
+#define	V_THROTTLE_ENABLE(x) ((x) << S_THROTTLE_ENABLE)
+#define	F_THROTTLE_ENABLE    V_THROTTLE_ENABLE(1U)
+
+#define	A_SGE_ITP_CONTROL 0x10b4
+
+#define	S_CRITICAL_TIME    10
+#define	M_CRITICAL_TIME    0x7fffU
+#define	V_CRITICAL_TIME(x) ((x) << S_CRITICAL_TIME)
+#define	G_CRITICAL_TIME(x) (((x) >> S_CRITICAL_TIME) & M_CRITICAL_TIME)
+
+#define	S_LL_EMPTY    4
+#define	M_LL_EMPTY    0x3fU
+#define	V_LL_EMPTY(x) ((x) << S_LL_EMPTY)
+#define	G_LL_EMPTY(x) (((x) >> S_LL_EMPTY) & M_LL_EMPTY)
+
+#define	S_LL_READ_WAIT_DISABLE    0
+#define	V_LL_READ_WAIT_DISABLE(x) ((x) << S_LL_READ_WAIT_DISABLE)
+#define	F_LL_READ_WAIT_DISABLE    V_LL_READ_WAIT_DISABLE(1U)
+
+#define	A_SGE_TIMER_VALUE_0_AND_1 0x10b8
+
+#define	S_TIMERVALUE0    16
+#define	M_TIMERVALUE0    0xffffU
+#define	V_TIMERVALUE0(x) ((x) << S_TIMERVALUE0)
+#define	G_TIMERVALUE0(x) (((x) >> S_TIMERVALUE0) & M_TIMERVALUE0)
+
+#define	S_TIMERVALUE1    0
+#define	M_TIMERVALUE1    0xffffU
+#define	V_TIMERVALUE1(x) ((x) << S_TIMERVALUE1)
+#define	G_TIMERVALUE1(x) (((x) >> S_TIMERVALUE1) & M_TIMERVALUE1)
+
+#define	A_SGE_TIMER_VALUE_2_AND_3 0x10bc
+
+#define	S_TIMERVALUE2    16
+#define	M_TIMERVALUE2    0xffffU
+#define	V_TIMERVALUE2(x) ((x) << S_TIMERVALUE2)
+#define	G_TIMERVALUE2(x) (((x) >> S_TIMERVALUE2) & M_TIMERVALUE2)
+
+#define	S_TIMERVALUE3    0
+#define	M_TIMERVALUE3    0xffffU
+#define	V_TIMERVALUE3(x) ((x) << S_TIMERVALUE3)
+#define	G_TIMERVALUE3(x) (((x) >> S_TIMERVALUE3) & M_TIMERVALUE3)
+
+#define	A_SGE_TIMER_VALUE_4_AND_5 0x10c0
+
+#define	S_TIMERVALUE4    16
+#define	M_TIMERVALUE4    0xffffU
+#define	V_TIMERVALUE4(x) ((x) << S_TIMERVALUE4)
+#define	G_TIMERVALUE4(x) (((x) >> S_TIMERVALUE4) & M_TIMERVALUE4)
+
+#define	S_TIMERVALUE5    0
+#define	M_TIMERVALUE5    0xffffU
+#define	V_TIMERVALUE5(x) ((x) << S_TIMERVALUE5)
+#define	G_TIMERVALUE5(x) (((x) >> S_TIMERVALUE5) & M_TIMERVALUE5)
+
+#define	A_SGE_PD_RSP_CREDIT01 0x10c4
+
+#define	S_RSPCREDITEN0    31
+#define	V_RSPCREDITEN0(x) ((x) << S_RSPCREDITEN0)
+#define	F_RSPCREDITEN0    V_RSPCREDITEN0(1U)
+
+#define	S_MAXTAG0    24
+#define	M_MAXTAG0    0x7fU
+#define	V_MAXTAG0(x) ((x) << S_MAXTAG0)
+#define	G_MAXTAG0(x) (((x) >> S_MAXTAG0) & M_MAXTAG0)
+
+#define	S_MAXRSPCNT0    16
+#define	M_MAXRSPCNT0    0xffU
+#define	V_MAXRSPCNT0(x) ((x) << S_MAXRSPCNT0)
+#define	G_MAXRSPCNT0(x) (((x) >> S_MAXRSPCNT0) & M_MAXRSPCNT0)
+
+#define	S_RSPCREDITEN1    15
+#define	V_RSPCREDITEN1(x) ((x) << S_RSPCREDITEN1)
+#define	F_RSPCREDITEN1    V_RSPCREDITEN1(1U)
+
+#define	S_MAXTAG1    8
+#define	M_MAXTAG1    0x7fU
+#define	V_MAXTAG1(x) ((x) << S_MAXTAG1)
+#define	G_MAXTAG1(x) (((x) >> S_MAXTAG1) & M_MAXTAG1)
+
+#define	S_MAXRSPCNT1    0
+#define	M_MAXRSPCNT1    0xffU
+#define	V_MAXRSPCNT1(x) ((x) << S_MAXRSPCNT1)
+#define	G_MAXRSPCNT1(x) (((x) >> S_MAXRSPCNT1) & M_MAXRSPCNT1)
+
+#define	A_SGE_PD_RSP_CREDIT23 0x10c8
+
+#define	S_RSPCREDITEN2    31
+#define	V_RSPCREDITEN2(x) ((x) << S_RSPCREDITEN2)
+#define	F_RSPCREDITEN2    V_RSPCREDITEN2(1U)
+
+#define	S_MAXTAG2    24
+#define	M_MAXTAG2    0x7fU
+#define	V_MAXTAG2(x) ((x) << S_MAXTAG2)
+#define	G_MAXTAG2(x) (((x) >> S_MAXTAG2) & M_MAXTAG2)
+
+#define	S_MAXRSPCNT2    16
+#define	M_MAXRSPCNT2    0xffU
+#define	V_MAXRSPCNT2(x) ((x) << S_MAXRSPCNT2)
+#define	G_MAXRSPCNT2(x) (((x) >> S_MAXRSPCNT2) & M_MAXRSPCNT2)
+
+#define	S_RSPCREDITEN3    15
+#define	V_RSPCREDITEN3(x) ((x) << S_RSPCREDITEN3)
+#define	F_RSPCREDITEN3    V_RSPCREDITEN3(1U)
+
+#define	S_MAXTAG3    8
+#define	M_MAXTAG3    0x7fU
+#define	V_MAXTAG3(x) ((x) << S_MAXTAG3)
+#define	G_MAXTAG3(x) (((x) >> S_MAXTAG3) & M_MAXTAG3)
+
+#define	S_MAXRSPCNT3    0
+#define	M_MAXRSPCNT3    0xffU
+#define	V_MAXRSPCNT3(x) ((x) << S_MAXRSPCNT3)
+#define	G_MAXRSPCNT3(x) (((x) >> S_MAXRSPCNT3) & M_MAXRSPCNT3)
+
+#define	A_SGE_DEBUG_INDEX 0x10cc
+#define	A_SGE_DEBUG_DATA_HIGH 0x10d0
+#define	A_SGE_DEBUG_DATA_LOW 0x10d4
+#define	A_SGE_REVISION 0x10d8
+#define	A_SGE_INT_CAUSE4 0x10dc
+
+#define	S_ERR_BAD_UPFL_INC_CREDIT3    8
+#define	V_ERR_BAD_UPFL_INC_CREDIT3(x) ((x) << S_ERR_BAD_UPFL_INC_CREDIT3)
+#define	F_ERR_BAD_UPFL_INC_CREDIT3    V_ERR_BAD_UPFL_INC_CREDIT3(1U)
+
+#define	S_ERR_BAD_UPFL_INC_CREDIT2    7
+#define	V_ERR_BAD_UPFL_INC_CREDIT2(x) ((x) << S_ERR_BAD_UPFL_INC_CREDIT2)
+#define	F_ERR_BAD_UPFL_INC_CREDIT2    V_ERR_BAD_UPFL_INC_CREDIT2(1U)
+
+#define	S_ERR_BAD_UPFL_INC_CREDIT1    6
+#define	V_ERR_BAD_UPFL_INC_CREDIT1(x) ((x) << S_ERR_BAD_UPFL_INC_CREDIT1)
+#define	F_ERR_BAD_UPFL_INC_CREDIT1    V_ERR_BAD_UPFL_INC_CREDIT1(1U)
+
+#define	S_ERR_BAD_UPFL_INC_CREDIT0    5
+#define	V_ERR_BAD_UPFL_INC_CREDIT0(x) ((x) << S_ERR_BAD_UPFL_INC_CREDIT0)
+#define	F_ERR_BAD_UPFL_INC_CREDIT0    V_ERR_BAD_UPFL_INC_CREDIT0(1U)
+
+#define	S_ERR_PHYSADDR_LEN0_IDMA1    4
+#define	V_ERR_PHYSADDR_LEN0_IDMA1(x) ((x) << S_ERR_PHYSADDR_LEN0_IDMA1)
+#define	F_ERR_PHYSADDR_LEN0_IDMA1    V_ERR_PHYSADDR_LEN0_IDMA1(1U)
+
+#define	S_ERR_PHYSADDR_LEN0_IDMA0    3
+#define	V_ERR_PHYSADDR_LEN0_IDMA0(x) ((x) << S_ERR_PHYSADDR_LEN0_IDMA0)
+#define	F_ERR_PHYSADDR_LEN0_IDMA0    V_ERR_PHYSADDR_LEN0_IDMA0(1U)
+
+#define	S_ERR_FLM_INVALID_PKT_DROP1    2
+#define	V_ERR_FLM_INVALID_PKT_DROP1(x) ((x) << S_ERR_FLM_INVALID_PKT_DROP1)
+#define	F_ERR_FLM_INVALID_PKT_DROP1    V_ERR_FLM_INVALID_PKT_DROP1(1U)
+
+#define	S_ERR_FLM_INVALID_PKT_DROP0    1
+#define	V_ERR_FLM_INVALID_PKT_DROP0(x) ((x) << S_ERR_FLM_INVALID_PKT_DROP0)
+#define	F_ERR_FLM_INVALID_PKT_DROP0    V_ERR_FLM_INVALID_PKT_DROP0(1U)
+
+#define	S_ERR_UNEXPECTED_TIMER    0
+#define	V_ERR_UNEXPECTED_TIMER(x) ((x) << S_ERR_UNEXPECTED_TIMER)
+#define	F_ERR_UNEXPECTED_TIMER    V_ERR_UNEXPECTED_TIMER(1U)
+
+#define	A_SGE_INT_ENABLE4 0x10e0
+#define	A_SGE_STAT_TOTAL 0x10e4
+#define	A_SGE_STAT_MATCH 0x10e8
+#define	A_SGE_STAT_CFG 0x10ec
+
+#define	S_ITPOPMODE    8
+#define	V_ITPOPMODE(x) ((x) << S_ITPOPMODE)
+#define	F_ITPOPMODE    V_ITPOPMODE(1U)
+
+#define	S_EGRCTXTOPMODE    6
+#define	M_EGRCTXTOPMODE    0x3U
+#define	V_EGRCTXTOPMODE(x) ((x) << S_EGRCTXTOPMODE)
+#define	G_EGRCTXTOPMODE(x) (((x) >> S_EGRCTXTOPMODE) & M_EGRCTXTOPMODE)
+
+#define	S_INGCTXTOPMODE    4
+#define	M_INGCTXTOPMODE    0x3U
+#define	V_INGCTXTOPMODE(x) ((x) << S_INGCTXTOPMODE)
+#define	G_INGCTXTOPMODE(x) (((x) >> S_INGCTXTOPMODE) & M_INGCTXTOPMODE)
+
+#define	S_STATMODE    2
+#define	M_STATMODE    0x3U
+#define	V_STATMODE(x) ((x) << S_STATMODE)
+#define	G_STATMODE(x) (((x) >> S_STATMODE) & M_STATMODE)
+
+#define	S_STATSOURCE    0
+#define	M_STATSOURCE    0x3U
+#define	V_STATSOURCE(x) ((x) << S_STATSOURCE)
+#define	G_STATSOURCE(x) (((x) >> S_STATSOURCE) & M_STATSOURCE)
+
+#define	A_SGE_HINT_CFG 0x10f0
+
+#define	S_HINTSALLOWEDNOHDR    6
+#define	M_HINTSALLOWEDNOHDR    0x3fU
+#define	V_HINTSALLOWEDNOHDR(x) ((x) << S_HINTSALLOWEDNOHDR)
+#define	G_HINTSALLOWEDNOHDR(x) \
+	(((x) >> S_HINTSALLOWEDNOHDR) & M_HINTSALLOWEDNOHDR)
+
+#define	S_HINTSALLOWEDHDR    0
+#define	M_HINTSALLOWEDHDR    0x3fU
+#define	V_HINTSALLOWEDHDR(x) ((x) << S_HINTSALLOWEDHDR)
+#define	G_HINTSALLOWEDHDR(x) (((x) >> S_HINTSALLOWEDHDR) & M_HINTSALLOWEDHDR)
+
+#define	A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+#define	A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8
+#define	A_SGE_PD_WRR_CONFIG 0x10fc
+
+#define	S_EDMA_WEIGHT    0
+#define	M_EDMA_WEIGHT    0x3fU
+#define	V_EDMA_WEIGHT(x) ((x) << S_EDMA_WEIGHT)
+#define	G_EDMA_WEIGHT(x) (((x) >> S_EDMA_WEIGHT) & M_EDMA_WEIGHT)
+
+#define	A_SGE_ERROR_STATS 0x1100
+
+#define	S_UNCAPTURED_ERROR    18
+#define	V_UNCAPTURED_ERROR(x) ((x) << S_UNCAPTURED_ERROR)
+#define	F_UNCAPTURED_ERROR    V_UNCAPTURED_ERROR(1U)
+
+#define	S_ERROR_QID_VALID    17
+#define	V_ERROR_QID_VALID(x) ((x) << S_ERROR_QID_VALID)
+#define	F_ERROR_QID_VALID    V_ERROR_QID_VALID(1U)
+
+#define	S_ERROR_QID    0
+#define	M_ERROR_QID    0x1ffffU
+#define	V_ERROR_QID(x) ((x) << S_ERROR_QID)
+#define	G_ERROR_QID(x) (((x) >> S_ERROR_QID) & M_ERROR_QID)
+
+#define	A_SGE_SHARED_TAG_CHAN_CFG 0x1104
+
+#define	S_MINTAG3    24
+#define	M_MINTAG3    0xffU
+#define	V_MINTAG3(x) ((x) << S_MINTAG3)
+#define	G_MINTAG3(x) (((x) >> S_MINTAG3) & M_MINTAG3)
+
+#define	S_MINTAG2    16
+#define	M_MINTAG2    0xffU
+#define	V_MINTAG2(x) ((x) << S_MINTAG2)
+#define	G_MINTAG2(x) (((x) >> S_MINTAG2) & M_MINTAG2)
+
+#define	S_MINTAG1    8
+#define	M_MINTAG1    0xffU
+#define	V_MINTAG1(x) ((x) << S_MINTAG1)
+#define	G_MINTAG1(x) (((x) >> S_MINTAG1) & M_MINTAG1)
+
+#define	S_MINTAG0    0
+#define	M_MINTAG0    0xffU
+#define	V_MINTAG0(x) ((x) << S_MINTAG0)
+#define	G_MINTAG0(x) (((x) >> S_MINTAG0) & M_MINTAG0)
+
+#define	A_SGE_SHARED_TAG_POOL_CFG 0x1108
+
+#define	S_TAGPOOLTOTAL    0
+#define	M_TAGPOOLTOTAL    0xffU
+#define	V_TAGPOOLTOTAL(x) ((x) << S_TAGPOOLTOTAL)
+#define	G_TAGPOOLTOTAL(x) (((x) >> S_TAGPOOLTOTAL) & M_TAGPOOLTOTAL)
+
+#define	A_SGE_PC0_REQ_BIST_CMD 0x1180
+#define	A_SGE_PC0_REQ_BIST_ERROR_CNT 0x1184
+#define	A_SGE_PC1_REQ_BIST_CMD 0x1190
+#define	A_SGE_PC1_REQ_BIST_ERROR_CNT 0x1194
+#define	A_SGE_PC0_RSP_BIST_CMD 0x11a0
+#define	A_SGE_PC0_RSP_BIST_ERROR_CNT 0x11a4
+#define	A_SGE_PC1_RSP_BIST_CMD 0x11b0
+#define	A_SGE_PC1_RSP_BIST_ERROR_CNT 0x11b4
+#define	A_SGE_CTXT_CMD 0x11fc
+
+#define	S_BUSY    31
+#define	V_BUSY(x) ((x) << S_BUSY)
+#define	F_BUSY    V_BUSY(1U)
+
+#define	S_CTXTOP    28
+#define	M_CTXTOP    0x3U
+#define	V_CTXTOP(x) ((x) << S_CTXTOP)
+#define	G_CTXTOP(x) (((x) >> S_CTXTOP) & M_CTXTOP)
+
+#define	S_CTXTTYPE    24
+#define	M_CTXTTYPE    0x3U
+#define	V_CTXTTYPE(x) ((x) << S_CTXTTYPE)
+#define	G_CTXTTYPE(x) (((x) >> S_CTXTTYPE) & M_CTXTTYPE)
+
+#define	S_CTXTQID    0
+#define	M_CTXTQID    0x1ffffU
+#define	V_CTXTQID(x) ((x) << S_CTXTQID)
+#define	G_CTXTQID(x) (((x) >> S_CTXTQID) & M_CTXTQID)
+
+#define	A_SGE_CTXT_DATA0 0x1200
+#define	A_SGE_CTXT_DATA1 0x1204
+#define	A_SGE_CTXT_DATA2 0x1208
+#define	A_SGE_CTXT_DATA3 0x120c
+#define	A_SGE_CTXT_DATA4 0x1210
+#define	A_SGE_CTXT_DATA5 0x1214
+#define	A_SGE_CTXT_DATA6 0x1218
+#define	A_SGE_CTXT_DATA7 0x121c
+#define	A_SGE_CTXT_MASK0 0x1220
+#define	A_SGE_CTXT_MASK1 0x1224
+#define	A_SGE_CTXT_MASK2 0x1228
+#define	A_SGE_CTXT_MASK3 0x122c
+#define	A_SGE_CTXT_MASK4 0x1230
+#define	A_SGE_CTXT_MASK5 0x1234
+#define	A_SGE_CTXT_MASK6 0x1238
+#define	A_SGE_CTXT_MASK7 0x123c
+#define	A_SGE_QUEUE_BASE_MAP_HIGH 0x1300
+
+#define	S_EGRESS_LOG2SIZE    27
+#define	M_EGRESS_LOG2SIZE    0x1fU
+#define	V_EGRESS_LOG2SIZE(x) ((x) << S_EGRESS_LOG2SIZE)
+#define	G_EGRESS_LOG2SIZE(x) (((x) >> S_EGRESS_LOG2SIZE) & M_EGRESS_LOG2SIZE)
+
+#define	S_EGRESS_BASE    10
+#define	M_EGRESS_BASE    0x1ffffU
+#define	V_EGRESS_BASE(x) ((x) << S_EGRESS_BASE)
+#define	G_EGRESS_BASE(x) (((x) >> S_EGRESS_BASE) & M_EGRESS_BASE)
+
+#define	S_INGRESS2_LOG2SIZE    5
+#define	M_INGRESS2_LOG2SIZE    0x1fU
+#define	V_INGRESS2_LOG2SIZE(x) ((x) << S_INGRESS2_LOG2SIZE)
+#define	G_INGRESS2_LOG2SIZE(x) \
+	(((x) >> S_INGRESS2_LOG2SIZE) & M_INGRESS2_LOG2SIZE)
+
+#define	S_INGRESS1_LOG2SIZE    0
+#define	M_INGRESS1_LOG2SIZE    0x1fU
+#define	V_INGRESS1_LOG2SIZE(x) ((x) << S_INGRESS1_LOG2SIZE)
+#define	G_INGRESS1_LOG2SIZE(x) \
+	(((x) >> S_INGRESS1_LOG2SIZE) & M_INGRESS1_LOG2SIZE)
+
+#define	A_SGE_QUEUE_BASE_MAP_LOW 0x1304
+
+#define	S_INGRESS2_BASE    16
+#define	M_INGRESS2_BASE    0xffffU
+#define	V_INGRESS2_BASE(x) ((x) << S_INGRESS2_BASE)
+#define	G_INGRESS2_BASE(x) (((x) >> S_INGRESS2_BASE) & M_INGRESS2_BASE)
+
+#define	S_INGRESS1_BASE    0
+#define	M_INGRESS1_BASE    0xffffU
+#define	V_INGRESS1_BASE(x) ((x) << S_INGRESS1_BASE)
+#define	G_INGRESS1_BASE(x) (((x) >> S_INGRESS1_BASE) & M_INGRESS1_BASE)
+
+#define	A_SGE_LA_RDPTR_0 0x1800
+#define	A_SGE_LA_RDDATA_0 0x1804
+#define	A_SGE_LA_WRPTR_0 0x1808
+#define	A_SGE_LA_RESERVED_0 0x180c
+#define	A_SGE_LA_RDPTR_1 0x1810
+#define	A_SGE_LA_RDDATA_1 0x1814
+#define	A_SGE_LA_WRPTR_1 0x1818
+#define	A_SGE_LA_RESERVED_1 0x181c
+#define	A_SGE_LA_RDPTR_2 0x1820
+#define	A_SGE_LA_RDDATA_2 0x1824
+#define	A_SGE_LA_WRPTR_2 0x1828
+#define	A_SGE_LA_RESERVED_2 0x182c
+#define	A_SGE_LA_RDPTR_3 0x1830
+#define	A_SGE_LA_RDDATA_3 0x1834
+#define	A_SGE_LA_WRPTR_3 0x1838
+#define	A_SGE_LA_RESERVED_3 0x183c
+#define	A_SGE_LA_RDPTR_4 0x1840
+#define	A_SGE_LA_RDDATA_4 0x1844
+#define	A_SGE_LA_WRPTR_4 0x1848
+#define	A_SGE_LA_RESERVED_4 0x184c
+#define	A_SGE_LA_RDPTR_5 0x1850
+#define	A_SGE_LA_RDDATA_5 0x1854
+#define	A_SGE_LA_WRPTR_5 0x1858
+#define	A_SGE_LA_RESERVED_5 0x185c
+#define	A_SGE_LA_RDPTR_6 0x1860
+#define	A_SGE_LA_RDDATA_6 0x1864
+#define	A_SGE_LA_WRPTR_6 0x1868
+#define	A_SGE_LA_RESERVED_6 0x186c
+#define	A_SGE_LA_RDPTR_7 0x1870
+#define	A_SGE_LA_RDDATA_7 0x1874
+#define	A_SGE_LA_WRPTR_7 0x1878
+#define	A_SGE_LA_RESERVED_7 0x187c
+#define	A_SGE_LA_RDPTR_8 0x1880
+#define	A_SGE_LA_RDDATA_8 0x1884
+#define	A_SGE_LA_WRPTR_8 0x1888
+#define	A_SGE_LA_RESERVED_8 0x188c
+#define	A_SGE_LA_RDPTR_9 0x1890
+#define	A_SGE_LA_RDDATA_9 0x1894
+#define	A_SGE_LA_WRPTR_9 0x1898
+#define	A_SGE_LA_RESERVED_9 0x189c
+#define	A_SGE_LA_RDPTR_10 0x18a0
+#define	A_SGE_LA_RDDATA_10 0x18a4
+#define	A_SGE_LA_WRPTR_10 0x18a8
+#define	A_SGE_LA_RESERVED_10 0x18ac
+#define	A_SGE_LA_RDPTR_11 0x18b0
+#define	A_SGE_LA_RDDATA_11 0x18b4
+#define	A_SGE_LA_WRPTR_11 0x18b8
+#define	A_SGE_LA_RESERVED_11 0x18bc
+#define	A_SGE_LA_RDPTR_12 0x18c0
+#define	A_SGE_LA_RDDATA_12 0x18c4
+#define	A_SGE_LA_WRPTR_12 0x18c8
+#define	A_SGE_LA_RESERVED_12 0x18cc
+#define	A_SGE_LA_RDPTR_13 0x18d0
+#define	A_SGE_LA_RDDATA_13 0x18d4
+#define	A_SGE_LA_WRPTR_13 0x18d8
+#define	A_SGE_LA_RESERVED_13 0x18dc
+#define	A_SGE_LA_RDPTR_14 0x18e0
+#define	A_SGE_LA_RDDATA_14 0x18e4
+#define	A_SGE_LA_WRPTR_14 0x18e8
+#define	A_SGE_LA_RESERVED_14 0x18ec
+#define	A_SGE_LA_RDPTR_15 0x18f0
+#define	A_SGE_LA_RDDATA_15 0x18f4
+#define	A_SGE_LA_WRPTR_15 0x18f8
+#define	A_SGE_LA_RESERVED_15 0x18fc
+
+/* registers for module PCIE */
+#define	PCIE_BASE_ADDR 0x3000
+
+#define	A_PCIE_PF_CFG 0x40
+
+#define	S_INTXSTAT    16
+#define	V_INTXSTAT(x) ((x) << S_INTXSTAT)
+#define	F_INTXSTAT    V_INTXSTAT(1U)
+
+#define	S_AUXPWRPMEN    15
+#define	V_AUXPWRPMEN(x) ((x) << S_AUXPWRPMEN)
+#define	F_AUXPWRPMEN    V_AUXPWRPMEN(1U)
+
+#define	S_NOSOFTRESET    14
+#define	V_NOSOFTRESET(x) ((x) << S_NOSOFTRESET)
+#define	F_NOSOFTRESET    V_NOSOFTRESET(1U)
+
+#define	S_AIVEC    4
+#define	M_AIVEC    0x3ffU
+#define	V_AIVEC(x) ((x) << S_AIVEC)
+#define	G_AIVEC(x) (((x) >> S_AIVEC) & M_AIVEC)
+
+#define	S_INTXTYPE    2
+#define	M_INTXTYPE    0x3U
+#define	V_INTXTYPE(x) ((x) << S_INTXTYPE)
+#define	G_INTXTYPE(x) (((x) >> S_INTXTYPE) & M_INTXTYPE)
+
+#define	S_D3HOTEN    1
+#define	V_D3HOTEN(x) ((x) << S_D3HOTEN)
+#define	F_D3HOTEN    V_D3HOTEN(1U)
+
+#define	S_CLIDECEN    0
+#define	V_CLIDECEN(x) ((x) << S_CLIDECEN)
+#define	F_CLIDECEN    V_CLIDECEN(1U)
+
+#define	A_PCIE_PF_CLI 0x44
+#define	A_PCIE_PF_GEN_MSG 0x48
+
+#define	S_MSGTYPE    0
+#define	M_MSGTYPE    0xffU
+#define	V_MSGTYPE(x) ((x) << S_MSGTYPE)
+#define	G_MSGTYPE(x) (((x) >> S_MSGTYPE) & M_MSGTYPE)
+
+#define	A_PCIE_PF_EXPROM_OFST 0x4c
+
+#define	S_OFFSET    10
+#define	M_OFFSET    0x3fffU
+#define	V_OFFSET(x) ((x) << S_OFFSET)
+#define	G_OFFSET(x) (((x) >> S_OFFSET) & M_OFFSET)
+
+#define	A_PCIE_INT_ENABLE 0x3000
+
+#define	S_NONFATALERR    30
+#define	V_NONFATALERR(x) ((x) << S_NONFATALERR)
+#define	F_NONFATALERR    V_NONFATALERR(1U)
+
+#define	S_UNXSPLCPLERR    29
+#define	V_UNXSPLCPLERR(x) ((x) << S_UNXSPLCPLERR)
+#define	F_UNXSPLCPLERR    V_UNXSPLCPLERR(1U)
+
+#define	S_PCIEPINT    28
+#define	V_PCIEPINT(x) ((x) << S_PCIEPINT)
+#define	F_PCIEPINT    V_PCIEPINT(1U)
+
+#define	S_PCIESINT    27
+#define	V_PCIESINT(x) ((x) << S_PCIESINT)
+#define	F_PCIESINT    V_PCIESINT(1U)
+
+#define	S_RPLPERR    26
+#define	V_RPLPERR(x) ((x) << S_RPLPERR)
+#define	F_RPLPERR    V_RPLPERR(1U)
+
+#define	S_RXWRPERR    25
+#define	V_RXWRPERR(x) ((x) << S_RXWRPERR)
+#define	F_RXWRPERR    V_RXWRPERR(1U)
+
+#define	S_RXCPLPERR    24
+#define	V_RXCPLPERR(x) ((x) << S_RXCPLPERR)
+#define	F_RXCPLPERR    V_RXCPLPERR(1U)
+
+#define	S_PIOTAGPERR    23
+#define	V_PIOTAGPERR(x) ((x) << S_PIOTAGPERR)
+#define	F_PIOTAGPERR    V_PIOTAGPERR(1U)
+
+#define	S_MATAGPERR    22
+#define	V_MATAGPERR(x) ((x) << S_MATAGPERR)
+#define	F_MATAGPERR    V_MATAGPERR(1U)
+
+#define	S_INTXCLRPERR    21
+#define	V_INTXCLRPERR(x) ((x) << S_INTXCLRPERR)
+#define	F_INTXCLRPERR    V_INTXCLRPERR(1U)
+
+#define	S_FIDPERR    20
+#define	V_FIDPERR(x) ((x) << S_FIDPERR)
+#define	F_FIDPERR    V_FIDPERR(1U)
+
+#define	S_CFGSNPPERR    19
+#define	V_CFGSNPPERR(x) ((x) << S_CFGSNPPERR)
+#define	F_CFGSNPPERR    V_CFGSNPPERR(1U)
+
+#define	S_HRSPPERR    18
+#define	V_HRSPPERR(x) ((x) << S_HRSPPERR)
+#define	F_HRSPPERR    V_HRSPPERR(1U)
+
+#define	S_HREQPERR    17
+#define	V_HREQPERR(x) ((x) << S_HREQPERR)
+#define	F_HREQPERR    V_HREQPERR(1U)
+
+#define	S_HCNTPERR    16
+#define	V_HCNTPERR(x) ((x) << S_HCNTPERR)
+#define	F_HCNTPERR    V_HCNTPERR(1U)
+
+#define	S_DRSPPERR    15
+#define	V_DRSPPERR(x) ((x) << S_DRSPPERR)
+#define	F_DRSPPERR    V_DRSPPERR(1U)
+
+#define	S_DREQPERR    14
+#define	V_DREQPERR(x) ((x) << S_DREQPERR)
+#define	F_DREQPERR    V_DREQPERR(1U)
+
+#define	S_DCNTPERR    13
+#define	V_DCNTPERR(x) ((x) << S_DCNTPERR)
+#define	F_DCNTPERR    V_DCNTPERR(1U)
+
+#define	S_CRSPPERR    12
+#define	V_CRSPPERR(x) ((x) << S_CRSPPERR)
+#define	F_CRSPPERR    V_CRSPPERR(1U)
+
+#define	S_CREQPERR    11
+#define	V_CREQPERR(x) ((x) << S_CREQPERR)
+#define	F_CREQPERR    V_CREQPERR(1U)
+
+#define	S_CCNTPERR    10
+#define	V_CCNTPERR(x) ((x) << S_CCNTPERR)
+#define	F_CCNTPERR    V_CCNTPERR(1U)
+
+#define	S_TARTAGPERR    9
+#define	V_TARTAGPERR(x) ((x) << S_TARTAGPERR)
+#define	F_TARTAGPERR    V_TARTAGPERR(1U)
+
+#define	S_PIOREQPERR    8
+#define	V_PIOREQPERR(x) ((x) << S_PIOREQPERR)
+#define	F_PIOREQPERR    V_PIOREQPERR(1U)
+
+#define	S_PIOCPLPERR    7
+#define	V_PIOCPLPERR(x) ((x) << S_PIOCPLPERR)
+#define	F_PIOCPLPERR    V_PIOCPLPERR(1U)
+
+#define	S_MSIXDIPERR    6
+#define	V_MSIXDIPERR(x) ((x) << S_MSIXDIPERR)
+#define	F_MSIXDIPERR    V_MSIXDIPERR(1U)
+
+#define	S_MSIXDATAPERR    5
+#define	V_MSIXDATAPERR(x) ((x) << S_MSIXDATAPERR)
+#define	F_MSIXDATAPERR    V_MSIXDATAPERR(1U)
+
+#define	S_MSIXADDRHPERR    4
+#define	V_MSIXADDRHPERR(x) ((x) << S_MSIXADDRHPERR)
+#define	F_MSIXADDRHPERR    V_MSIXADDRHPERR(1U)
+
+#define	S_MSIXADDRLPERR    3
+#define	V_MSIXADDRLPERR(x) ((x) << S_MSIXADDRLPERR)
+#define	F_MSIXADDRLPERR    V_MSIXADDRLPERR(1U)
+
+#define	S_MSIDATAPERR    2
+#define	V_MSIDATAPERR(x) ((x) << S_MSIDATAPERR)
+#define	F_MSIDATAPERR    V_MSIDATAPERR(1U)
+
+#define	S_MSIADDRHPERR    1
+#define	V_MSIADDRHPERR(x) ((x) << S_MSIADDRHPERR)
+#define	F_MSIADDRHPERR    V_MSIADDRHPERR(1U)
+
+#define	S_MSIADDRLPERR    0
+#define	V_MSIADDRLPERR(x) ((x) << S_MSIADDRLPERR)
+#define	F_MSIADDRLPERR    V_MSIADDRLPERR(1U)
+
+#define	A_PCIE_INT_CAUSE 0x3004
+#define	A_PCIE_PERR_ENABLE 0x3008
+#define	A_PCIE_PERR_INJECT 0x300c
+
+#define	S_IDE    0
+#define	V_IDE(x) ((x) << S_IDE)
+#define	F_IDE    V_IDE(1U)
+
+#define	A_PCIE_NONFAT_ERR 0x3010
+
+#define	S_RDRSPERR    9
+#define	V_RDRSPERR(x) ((x) << S_RDRSPERR)
+#define	F_RDRSPERR    V_RDRSPERR(1U)
+
+#define	S_VPDRSPERR    8
+#define	V_VPDRSPERR(x) ((x) << S_VPDRSPERR)
+#define	F_VPDRSPERR    V_VPDRSPERR(1U)
+
+#define	S_POPD    7
+#define	V_POPD(x) ((x) << S_POPD)
+#define	F_POPD    V_POPD(1U)
+
+#define	S_POPH    6
+#define	V_POPH(x) ((x) << S_POPH)
+#define	F_POPH    V_POPH(1U)
+
+#define	S_POPC    5
+#define	V_POPC(x) ((x) << S_POPC)
+#define	F_POPC    V_POPC(1U)
+
+#define	S_MEMREQ    4
+#define	V_MEMREQ(x) ((x) << S_MEMREQ)
+#define	F_MEMREQ    V_MEMREQ(1U)
+
+#define	S_PIOREQ    3
+#define	V_PIOREQ(x) ((x) << S_PIOREQ)
+#define	F_PIOREQ    V_PIOREQ(1U)
+
+#define	S_TAGDROP    2
+#define	V_TAGDROP(x) ((x) << S_TAGDROP)
+#define	F_TAGDROP    V_TAGDROP(1U)
+
+#define	S_TAGCPL    1
+#define	V_TAGCPL(x) ((x) << S_TAGCPL)
+#define	F_TAGCPL    V_TAGCPL(1U)
+
+#define	S_CFGSNP    0
+#define	V_CFGSNP(x) ((x) << S_CFGSNP)
+#define	F_CFGSNP    V_CFGSNP(1U)
+
+#define	A_PCIE_CFG 0x3014
+
+#define	S_CFGDMAXPYLDSZRX    26
+#define	M_CFGDMAXPYLDSZRX    0x7U
+#define	V_CFGDMAXPYLDSZRX(x) ((x) << S_CFGDMAXPYLDSZRX)
+#define	G_CFGDMAXPYLDSZRX(x) (((x) >> S_CFGDMAXPYLDSZRX) & M_CFGDMAXPYLDSZRX)
+
+#define	S_CFGDMAXPYLDSZTX    23
+#define	M_CFGDMAXPYLDSZTX    0x7U
+#define	V_CFGDMAXPYLDSZTX(x) ((x) << S_CFGDMAXPYLDSZTX)
+#define	G_CFGDMAXPYLDSZTX(x) (((x) >> S_CFGDMAXPYLDSZTX) & M_CFGDMAXPYLDSZTX)
+
+#define	S_CFGDMAXRDREQSZ    20
+#define	M_CFGDMAXRDREQSZ    0x7U
+#define	V_CFGDMAXRDREQSZ(x) ((x) << S_CFGDMAXRDREQSZ)
+#define	G_CFGDMAXRDREQSZ(x) (((x) >> S_CFGDMAXRDREQSZ) & M_CFGDMAXRDREQSZ)
+
+#define	S_MASYNCEN    19
+#define	V_MASYNCEN(x) ((x) << S_MASYNCEN)
+#define	F_MASYNCEN    V_MASYNCEN(1U)
+
+#define	S_DCAENDMA    18
+#define	V_DCAENDMA(x) ((x) << S_DCAENDMA)
+#define	F_DCAENDMA    V_DCAENDMA(1U)
+
+#define	S_DCAENCMD    17
+#define	V_DCAENCMD(x) ((x) << S_DCAENCMD)
+#define	F_DCAENCMD    V_DCAENCMD(1U)
+
+#define	S_VFMSIPNDEN    16
+#define	V_VFMSIPNDEN(x) ((x) << S_VFMSIPNDEN)
+#define	F_VFMSIPNDEN    V_VFMSIPNDEN(1U)
+
+#define	S_FORCETXERROR    15
+#define	V_FORCETXERROR(x) ((x) << S_FORCETXERROR)
+#define	F_FORCETXERROR    V_FORCETXERROR(1U)
+
+#define	S_VPDREQPROTECT    14
+#define	V_VPDREQPROTECT(x) ((x) << S_VPDREQPROTECT)
+#define	F_VPDREQPROTECT    V_VPDREQPROTECT(1U)
+
+#define	S_FIDTABLEINVALID    13
+#define	V_FIDTABLEINVALID(x) ((x) << S_FIDTABLEINVALID)
+#define	F_FIDTABLEINVALID    V_FIDTABLEINVALID(1U)
+
+#define	S_BYPASSMSIXCACHE    12
+#define	V_BYPASSMSIXCACHE(x) ((x) << S_BYPASSMSIXCACHE)
+#define	F_BYPASSMSIXCACHE    V_BYPASSMSIXCACHE(1U)
+
+#define	S_BYPASSMSICACHE    11
+#define	V_BYPASSMSICACHE(x) ((x) << S_BYPASSMSICACHE)
+#define	F_BYPASSMSICACHE    V_BYPASSMSICACHE(1U)
+
+#define	S_SIMSPEED    10
+#define	V_SIMSPEED(x) ((x) << S_SIMSPEED)
+#define	F_SIMSPEED    V_SIMSPEED(1U)
+
+#define	S_TC0_STAMP    9
+#define	V_TC0_STAMP(x) ((x) << S_TC0_STAMP)
+#define	F_TC0_STAMP    V_TC0_STAMP(1U)
+
+#define	S_AI_TCVAL    6
+#define	M_AI_TCVAL    0x7U
+#define	V_AI_TCVAL(x) ((x) << S_AI_TCVAL)
+#define	G_AI_TCVAL(x) (((x) >> S_AI_TCVAL) & M_AI_TCVAL)
+
+#define	S_DMASTOPEN    5
+#define	V_DMASTOPEN(x) ((x) << S_DMASTOPEN)
+#define	F_DMASTOPEN    V_DMASTOPEN(1U)
+
+#define	S_DEVSTATERSTMODE    4
+#define	V_DEVSTATERSTMODE(x) ((x) << S_DEVSTATERSTMODE)
+#define	F_DEVSTATERSTMODE    V_DEVSTATERSTMODE(1U)
+
+#define	S_HOTRSTPCIECRSTMODE    3
+#define	V_HOTRSTPCIECRSTMODE(x) ((x) << S_HOTRSTPCIECRSTMODE)
+#define	F_HOTRSTPCIECRSTMODE    V_HOTRSTPCIECRSTMODE(1U)
+
+#define	S_DLDNPCIECRSTMODE    2
+#define	V_DLDNPCIECRSTMODE(x) ((x) << S_DLDNPCIECRSTMODE)
+#define	F_DLDNPCIECRSTMODE    V_DLDNPCIECRSTMODE(1U)
+
+#define	S_DLDNPCIEPRECRSTMODE    1
+#define	V_DLDNPCIEPRECRSTMODE(x) ((x) << S_DLDNPCIEPRECRSTMODE)
+#define	F_DLDNPCIEPRECRSTMODE    V_DLDNPCIEPRECRSTMODE(1U)
+
+#define	S_LINKDNRSTEN    0
+#define	V_LINKDNRSTEN(x) ((x) << S_LINKDNRSTEN)
+#define	F_LINKDNRSTEN    V_LINKDNRSTEN(1U)
+
+#define	A_PCIE_DMA_CTRL 0x3018
+
+#define	S_LITTLEENDIAN    7
+#define	V_LITTLEENDIAN(x) ((x) << S_LITTLEENDIAN)
+#define	F_LITTLEENDIAN    V_LITTLEENDIAN(1U)
+
+#define	A_PCIE_DMA_CFG 0x301c
+
+#define	S_MAXPYLDSIZE    28
+#define	M_MAXPYLDSIZE    0x7U
+#define	V_MAXPYLDSIZE(x) ((x) << S_MAXPYLDSIZE)
+#define	G_MAXPYLDSIZE(x) (((x) >> S_MAXPYLDSIZE) & M_MAXPYLDSIZE)
+
+#define	S_MAXRDREQSIZE    25
+#define	M_MAXRDREQSIZE    0x7U
+#define	V_MAXRDREQSIZE(x) ((x) << S_MAXRDREQSIZE)
+#define	G_MAXRDREQSIZE(x) (((x) >> S_MAXRDREQSIZE) & M_MAXRDREQSIZE)
+
+#define	S_DMA_MAXRSPCNT    16
+#define	M_DMA_MAXRSPCNT    0x1ffU
+#define	V_DMA_MAXRSPCNT(x) ((x) << S_DMA_MAXRSPCNT)
+#define	G_DMA_MAXRSPCNT(x) (((x) >> S_DMA_MAXRSPCNT) & M_DMA_MAXRSPCNT)
+
+#define	S_DMA_MAXREQCNT    8
+#define	M_DMA_MAXREQCNT    0xffU
+#define	V_DMA_MAXREQCNT(x) ((x) << S_DMA_MAXREQCNT)
+#define	G_DMA_MAXREQCNT(x) (((x) >> S_DMA_MAXREQCNT) & M_DMA_MAXREQCNT)
+
+#define	S_MAXTAG    0
+#define	M_MAXTAG    0x7fU
+#define	V_MAXTAG(x) ((x) << S_MAXTAG)
+#define	G_MAXTAG(x) (((x) >> S_MAXTAG) & M_MAXTAG)
+
+#define	A_PCIE_DMA_STAT 0x3020
+
+#define	S_STATEREQ    28
+#define	M_STATEREQ    0xfU
+#define	V_STATEREQ(x) ((x) << S_STATEREQ)
+#define	G_STATEREQ(x) (((x) >> S_STATEREQ) & M_STATEREQ)
+
+#define	S_DMA_RSPCNT    16
+#define	M_DMA_RSPCNT    0xfffU
+#define	V_DMA_RSPCNT(x) ((x) << S_DMA_RSPCNT)
+#define	G_DMA_RSPCNT(x) (((x) >> S_DMA_RSPCNT) & M_DMA_RSPCNT)
+
+#define	S_STATEAREQ    13
+#define	M_STATEAREQ    0x7U
+#define	V_STATEAREQ(x) ((x) << S_STATEAREQ)
+#define	G_STATEAREQ(x) (((x) >> S_STATEAREQ) & M_STATEAREQ)
+
+#define	S_TAGFREE    12
+#define	V_TAGFREE(x) ((x) << S_TAGFREE)
+#define	F_TAGFREE    V_TAGFREE(1U)
+
+#define	S_DMA_REQCNT    0
+#define	M_DMA_REQCNT    0x7ffU
+#define	V_DMA_REQCNT(x) ((x) << S_DMA_REQCNT)
+#define	G_DMA_REQCNT(x) (((x) >> S_DMA_REQCNT) & M_DMA_REQCNT)
+
+#define	A_PCIE_CMD_CTRL 0x303c
+#define	A_PCIE_CMD_CFG 0x3040
+
+#define	S_MAXRSPCNT    16
+#define	M_MAXRSPCNT    0xfU
+#define	V_MAXRSPCNT(x) ((x) << S_MAXRSPCNT)
+#define	G_MAXRSPCNT(x) (((x) >> S_MAXRSPCNT) & M_MAXRSPCNT)
+
+#define	S_MAXREQCNT    8
+#define	M_MAXREQCNT    0x1fU
+#define	V_MAXREQCNT(x) ((x) << S_MAXREQCNT)
+#define	G_MAXREQCNT(x) (((x) >> S_MAXREQCNT) & M_MAXREQCNT)
+
+#define	A_PCIE_CMD_STAT 0x3044
+
+#define	S_RSPCNT    16
+#define	M_RSPCNT    0x7fU
+#define	V_RSPCNT(x) ((x) << S_RSPCNT)
+#define	G_RSPCNT(x) (((x) >> S_RSPCNT) & M_RSPCNT)
+
+#define	S_REQCNT    0
+#define	M_REQCNT    0xffU
+#define	V_REQCNT(x) ((x) << S_REQCNT)
+#define	G_REQCNT(x) (((x) >> S_REQCNT) & M_REQCNT)
+
+#define	A_PCIE_HMA_CTRL 0x3050
+
+#define	S_IPLTSSM    12
+#define	M_IPLTSSM    0xfU
+#define	V_IPLTSSM(x) ((x) << S_IPLTSSM)
+#define	G_IPLTSSM(x) (((x) >> S_IPLTSSM) & M_IPLTSSM)
+
+#define	S_IPCONFIGDOWN    8
+#define	M_IPCONFIGDOWN    0x7U
+#define	V_IPCONFIGDOWN(x) ((x) << S_IPCONFIGDOWN)
+#define	G_IPCONFIGDOWN(x) (((x) >> S_IPCONFIGDOWN) & M_IPCONFIGDOWN)
+
+#define	A_PCIE_HMA_CFG 0x3054
+
+#define	S_HMA_MAXRSPCNT    16
+#define	M_HMA_MAXRSPCNT    0x1fU
+#define	V_HMA_MAXRSPCNT(x) ((x) << S_HMA_MAXRSPCNT)
+#define	G_HMA_MAXRSPCNT(x) (((x) >> S_HMA_MAXRSPCNT) & M_HMA_MAXRSPCNT)
+
+#define	A_PCIE_HMA_STAT 0x3058
+
+#define	S_HMA_RSPCNT    16
+#define	M_HMA_RSPCNT    0xffU
+#define	V_HMA_RSPCNT(x) ((x) << S_HMA_RSPCNT)
+#define	G_HMA_RSPCNT(x) (((x) >> S_HMA_RSPCNT) & M_HMA_RSPCNT)
+
+#define	A_PCIE_PIO_FIFO_CFG 0x305c
+
+#define	S_CPLCONFIG    16
+#define	M_CPLCONFIG    0xffffU
+#define	V_CPLCONFIG(x) ((x) << S_CPLCONFIG)
+#define	G_CPLCONFIG(x) (((x) >> S_CPLCONFIG) & M_CPLCONFIG)
+
+#define	S_PIOSTOPEN    12
+#define	V_PIOSTOPEN(x) ((x) << S_PIOSTOPEN)
+#define	F_PIOSTOPEN    V_PIOSTOPEN(1U)
+
+#define	S_IPLANESWAP    11
+#define	V_IPLANESWAP(x) ((x) << S_IPLANESWAP)
+#define	F_IPLANESWAP    V_IPLANESWAP(1U)
+
+#define	S_FORCESTRICTTS1    10
+#define	V_FORCESTRICTTS1(x) ((x) << S_FORCESTRICTTS1)
+#define	F_FORCESTRICTTS1    V_FORCESTRICTTS1(1U)
+
+#define	S_FORCEPROGRESSCNT    0
+#define	M_FORCEPROGRESSCNT    0x3ffU
+#define	V_FORCEPROGRESSCNT(x) ((x) << S_FORCEPROGRESSCNT)
+#define	G_FORCEPROGRESSCNT(x) (((x) >> S_FORCEPROGRESSCNT) & M_FORCEPROGRESSCNT)
+
+#define	A_PCIE_CFG_SPACE_REQ 0x3060
+
+#define	S_ENABLE    30
+#define	V_ENABLE(x) ((x) << S_ENABLE)
+#define	F_ENABLE    V_ENABLE(1U)
+
+#define	S_AI    29
+#define	V_AI(x) ((x) << S_AI)
+#define	F_AI    V_AI(1U)
+
+#define	S_LOCALCFG    28
+#define	V_LOCALCFG(x) ((x) << S_LOCALCFG)
+#define	F_LOCALCFG    V_LOCALCFG(1U)
+
+#define	S_BUS    20
+#define	M_BUS    0xffU
+#define	V_BUS(x) ((x) << S_BUS)
+#define	G_BUS(x) (((x) >> S_BUS) & M_BUS)
+
+#define	S_DEVICE    15
+#define	M_DEVICE    0x1fU
+#define	V_DEVICE(x) ((x) << S_DEVICE)
+#define	G_DEVICE(x) (((x) >> S_DEVICE) & M_DEVICE)
+
+#define	S_FUNCTION    12
+#define	M_FUNCTION    0x7U
+#define	V_FUNCTION(x) ((x) << S_FUNCTION)
+#define	G_FUNCTION(x) (((x) >> S_FUNCTION) & M_FUNCTION)
+
+#define	S_EXTREGISTER    8
+#define	M_EXTREGISTER    0xfU
+#define	V_EXTREGISTER(x) ((x) << S_EXTREGISTER)
+#define	G_EXTREGISTER(x) (((x) >> S_EXTREGISTER) & M_EXTREGISTER)
+
+#define	S_REGISTER    0
+#define	M_REGISTER    0xffU
+#define	V_REGISTER(x) ((x) << S_REGISTER)
+#define	G_REGISTER(x) (((x) >> S_REGISTER) & M_REGISTER)
+
+#define	A_PCIE_CFG_SPACE_DATA 0x3064
+#define	A_PCIE_MEM_ACCESS_BASE_WIN 0x3068
+
+#define	S_PCIEOFST    10
+#define	M_PCIEOFST    0x3fffffU
+#define	V_PCIEOFST(x) ((x) << S_PCIEOFST)
+#define	G_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
+
+#define	S_BIR    8
+#define	M_BIR    0x3U
+#define	V_BIR(x) ((x) << S_BIR)
+#define	G_BIR(x) (((x) >> S_BIR) & M_BIR)
+
+#define	S_WINDOW    0
+#define	M_WINDOW    0xffU
+#define	V_WINDOW(x) ((x) << S_WINDOW)
+#define	G_WINDOW(x) (((x) >> S_WINDOW) & M_WINDOW)
+
+#define	A_PCIE_MEM_ACCESS_OFFSET 0x306c
+#define	A_PCIE_MAILBOX_BASE_WIN 0x30a8
+
+#define	S_MBOXPCIEOFST    6
+#define	M_MBOXPCIEOFST    0x3ffffffU
+#define	V_MBOXPCIEOFST(x) ((x) << S_MBOXPCIEOFST)
+#define	G_MBOXPCIEOFST(x) (((x) >> S_MBOXPCIEOFST) & M_MBOXPCIEOFST)
+
+#define	S_MBOXBIR    4
+#define	M_MBOXBIR    0x3U
+#define	V_MBOXBIR(x) ((x) << S_MBOXBIR)
+#define	G_MBOXBIR(x) (((x) >> S_MBOXBIR) & M_MBOXBIR)
+
+#define	S_MBOXWIN    0
+#define	M_MBOXWIN    0x3U
+#define	V_MBOXWIN(x) ((x) << S_MBOXWIN)
+#define	G_MBOXWIN(x) (((x) >> S_MBOXWIN) & M_MBOXWIN)
+
+#define	A_PCIE_MAILBOX_OFFSET 0x30ac
+#define	A_PCIE_MA_CTRL 0x30b0
+
+#define	S_MA_TAGFREE    29
+#define	V_MA_TAGFREE(x) ((x) << S_MA_TAGFREE)
+#define	F_MA_TAGFREE    V_MA_TAGFREE(1U)
+
+#define	S_MA_MAXRSPCNT    24
+#define	M_MA_MAXRSPCNT    0x1fU
+#define	V_MA_MAXRSPCNT(x) ((x) << S_MA_MAXRSPCNT)
+#define	G_MA_MAXRSPCNT(x) (((x) >> S_MA_MAXRSPCNT) & M_MA_MAXRSPCNT)
+
+#define	S_MA_MAXREQCNT    16
+#define	M_MA_MAXREQCNT    0x1fU
+#define	V_MA_MAXREQCNT(x) ((x) << S_MA_MAXREQCNT)
+#define	G_MA_MAXREQCNT(x) (((x) >> S_MA_MAXREQCNT) & M_MA_MAXREQCNT)
+
+#define	S_MA_LE    15
+#define	V_MA_LE(x) ((x) << S_MA_LE)
+#define	F_MA_LE    V_MA_LE(1U)
+
+#define	S_MA_MAXPYLDSIZE    12
+#define	M_MA_MAXPYLDSIZE    0x7U
+#define	V_MA_MAXPYLDSIZE(x) ((x) << S_MA_MAXPYLDSIZE)
+#define	G_MA_MAXPYLDSIZE(x) (((x) >> S_MA_MAXPYLDSIZE) & M_MA_MAXPYLDSIZE)
+
+#define	S_MA_MAXRDREQSIZE    8
+#define	M_MA_MAXRDREQSIZE    0x7U
+#define	V_MA_MAXRDREQSIZE(x) ((x) << S_MA_MAXRDREQSIZE)
+#define	G_MA_MAXRDREQSIZE(x) (((x) >> S_MA_MAXRDREQSIZE) & M_MA_MAXRDREQSIZE)
+
+#define	S_MA_MAXTAG    0
+#define	M_MA_MAXTAG    0x1fU
+#define	V_MA_MAXTAG(x) ((x) << S_MA_MAXTAG)
+#define	G_MA_MAXTAG(x) (((x) >> S_MA_MAXTAG) & M_MA_MAXTAG)
+
+#define	A_PCIE_MA_SYNC 0x30b4
+#define	A_PCIE_FW 0x30b8
+#define	A_PCIE_FW_PF 0x30bc
+#define	A_PCIE_PIO_PAUSE 0x30dc
+
+#define	S_PIOPAUSEDONE    31
+#define	V_PIOPAUSEDONE(x) ((x) << S_PIOPAUSEDONE)
+#define	F_PIOPAUSEDONE    V_PIOPAUSEDONE(1U)
+
+#define	S_PIOPAUSETIME    4
+#define	M_PIOPAUSETIME    0xffffffU
+#define	V_PIOPAUSETIME(x) ((x) << S_PIOPAUSETIME)
+#define	G_PIOPAUSETIME(x) (((x) >> S_PIOPAUSETIME) & M_PIOPAUSETIME)
+
+#define	S_PIOPAUSE    0
+#define	V_PIOPAUSE(x) ((x) << S_PIOPAUSE)
+#define	F_PIOPAUSE    V_PIOPAUSE(1U)
+
+#define	A_PCIE_SYS_CFG_READY 0x30e0
+#define	A_PCIE_STATIC_CFG1 0x30e4
+
+#define	S_LINKDOWN_RESET_EN    26
+#define	V_LINKDOWN_RESET_EN(x) ((x) << S_LINKDOWN_RESET_EN)
+#define	F_LINKDOWN_RESET_EN    V_LINKDOWN_RESET_EN(1U)
+
+#define	S_IN_WR_DISCONTIG    25
+#define	V_IN_WR_DISCONTIG(x) ((x) << S_IN_WR_DISCONTIG)
+#define	F_IN_WR_DISCONTIG    V_IN_WR_DISCONTIG(1U)
+
+#define	S_IN_RD_CPLSIZE    22
+#define	M_IN_RD_CPLSIZE    0x7U
+#define	V_IN_RD_CPLSIZE(x) ((x) << S_IN_RD_CPLSIZE)
+#define	G_IN_RD_CPLSIZE(x) (((x) >> S_IN_RD_CPLSIZE) & M_IN_RD_CPLSIZE)
+
+#define	S_IN_RD_BUFMODE    20
+#define	M_IN_RD_BUFMODE    0x3U
+#define	V_IN_RD_BUFMODE(x) ((x) << S_IN_RD_BUFMODE)
+#define	G_IN_RD_BUFMODE(x) (((x) >> S_IN_RD_BUFMODE) & M_IN_RD_BUFMODE)
+
+#define	S_GBIF_NPTRANS_TOT    18
+#define	M_GBIF_NPTRANS_TOT    0x3U
+#define	V_GBIF_NPTRANS_TOT(x) ((x) << S_GBIF_NPTRANS_TOT)
+#define	G_GBIF_NPTRANS_TOT(x) (((x) >> S_GBIF_NPTRANS_TOT) & M_GBIF_NPTRANS_TOT)
+
+#define	S_IN_PDAT_TOT    15
+#define	M_IN_PDAT_TOT    0x7U
+#define	V_IN_PDAT_TOT(x) ((x) << S_IN_PDAT_TOT)
+#define	G_IN_PDAT_TOT(x) (((x) >> S_IN_PDAT_TOT) & M_IN_PDAT_TOT)
+
+#define	S_PCIE_NPTRANS_TOT    12
+#define	M_PCIE_NPTRANS_TOT    0x7U
+#define	V_PCIE_NPTRANS_TOT(x) ((x) << S_PCIE_NPTRANS_TOT)
+#define	G_PCIE_NPTRANS_TOT(x) (((x) >> S_PCIE_NPTRANS_TOT) & M_PCIE_NPTRANS_TOT)
+
+#define	S_OUT_PDAT_TOT    9
+#define	M_OUT_PDAT_TOT    0x7U
+#define	V_OUT_PDAT_TOT(x) ((x) << S_OUT_PDAT_TOT)
+#define	G_OUT_PDAT_TOT(x) (((x) >> S_OUT_PDAT_TOT) & M_OUT_PDAT_TOT)
+
+#define	S_GBIF_MAX_WRSIZE    6
+#define	M_GBIF_MAX_WRSIZE    0x7U
+#define	V_GBIF_MAX_WRSIZE(x) ((x) << S_GBIF_MAX_WRSIZE)
+#define	G_GBIF_MAX_WRSIZE(x) (((x) >> S_GBIF_MAX_WRSIZE) & M_GBIF_MAX_WRSIZE)
+
+#define	S_GBIF_MAX_RDSIZE    3
+#define	M_GBIF_MAX_RDSIZE    0x7U
+#define	V_GBIF_MAX_RDSIZE(x) ((x) << S_GBIF_MAX_RDSIZE)
+#define	G_GBIF_MAX_RDSIZE(x) (((x) >> S_GBIF_MAX_RDSIZE) & M_GBIF_MAX_RDSIZE)
+
+#define	S_PCIE_MAX_RDSIZE    0
+#define	M_PCIE_MAX_RDSIZE    0x7U
+#define	V_PCIE_MAX_RDSIZE(x) ((x) << S_PCIE_MAX_RDSIZE)
+#define	G_PCIE_MAX_RDSIZE(x) (((x) >> S_PCIE_MAX_RDSIZE) & M_PCIE_MAX_RDSIZE)
+
+#define	A_PCIE_DBG_INDIR_REQ 0x30ec
+
+#define	S_DBGENABLE    31
+#define	V_DBGENABLE(x) ((x) << S_DBGENABLE)
+#define	F_DBGENABLE    V_DBGENABLE(1U)
+
+#define	S_DBGAUTOINC    30
+#define	V_DBGAUTOINC(x) ((x) << S_DBGAUTOINC)
+#define	F_DBGAUTOINC    V_DBGAUTOINC(1U)
+
+#define	S_POINTER    8
+#define	M_POINTER    0xffffU
+#define	V_POINTER(x) ((x) << S_POINTER)
+#define	G_POINTER(x) (((x) >> S_POINTER) & M_POINTER)
+
+#define	S_SELECT    0
+#define	M_SELECT    0xfU
+#define	V_SELECT(x) ((x) << S_SELECT)
+#define	G_SELECT(x) (((x) >> S_SELECT) & M_SELECT)
+
+#define	A_PCIE_DBG_INDIR_DATA_0 0x30f0
+#define	A_PCIE_DBG_INDIR_DATA_1 0x30f4
+#define	A_PCIE_DBG_INDIR_DATA_2 0x30f8
+#define	A_PCIE_DBG_INDIR_DATA_3 0x30fc
+#define	A_PCIE_FUNC_INT_CFG 0x3100
+
+#define	S_PBAOFST    28
+#define	M_PBAOFST    0xfU
+#define	V_PBAOFST(x) ((x) << S_PBAOFST)
+#define	G_PBAOFST(x) (((x) >> S_PBAOFST) & M_PBAOFST)
+
+#define	S_TABOFST    24
+#define	M_TABOFST    0xfU
+#define	V_TABOFST(x) ((x) << S_TABOFST)
+#define	G_TABOFST(x) (((x) >> S_TABOFST) & M_TABOFST)
+
+#define	S_VECNUM    12
+#define	M_VECNUM    0x3ffU
+#define	V_VECNUM(x) ((x) << S_VECNUM)
+#define	G_VECNUM(x) (((x) >> S_VECNUM) & M_VECNUM)
+
+#define	S_VECBASE    0
+#define	M_VECBASE    0x7ffU
+#define	V_VECBASE(x) ((x) << S_VECBASE)
+#define	G_VECBASE(x) (((x) >> S_VECBASE) & M_VECBASE)
+
+#define	A_PCIE_FUNC_CTL_STAT 0x3104
+
+#define	S_SENDFLRRSP    31
+#define	V_SENDFLRRSP(x) ((x) << S_SENDFLRRSP)
+#define	F_SENDFLRRSP    V_SENDFLRRSP(1U)
+
+#define	S_IMMFLRRSP    24
+#define	V_IMMFLRRSP(x) ((x) << S_IMMFLRRSP)
+#define	F_IMMFLRRSP    V_IMMFLRRSP(1U)
+
+#define	S_TXNDISABLE    20
+#define	V_TXNDISABLE(x) ((x) << S_TXNDISABLE)
+#define	F_TXNDISABLE    V_TXNDISABLE(1U)
+
+#define	S_PNDTXNS    8
+#define	M_PNDTXNS    0x3ffU
+#define	V_PNDTXNS(x) ((x) << S_PNDTXNS)
+#define	G_PNDTXNS(x) (((x) >> S_PNDTXNS) & M_PNDTXNS)
+
+#define	S_VFVLD    3
+#define	V_VFVLD(x) ((x) << S_VFVLD)
+#define	F_VFVLD    V_VFVLD(1U)
+
+#define	S_PFNUM    0
+#define	M_PFNUM    0x7U
+#define	V_PFNUM(x) ((x) << S_PFNUM)
+#define	G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM)
+
+#define	A_PCIE_FID 0x3900
+
+#define	S_PAD    11
+#define	V_PAD(x) ((x) << S_PAD)
+#define	F_PAD    V_PAD(1U)
+
+#define	S_TC    8
+#define	M_TC    0x7U
+#define	V_TC(x) ((x) << S_TC)
+#define	G_TC(x) (((x) >> S_TC) & M_TC)
+
+#define	S_FUNC    0
+#define	M_FUNC    0xffU
+#define	V_FUNC(x) ((x) << S_FUNC)
+#define	G_FUNC(x) (((x) >> S_FUNC) & M_FUNC)
+
+#define	A_PCIE_CORE_UTL_SYSTEM_BUS_CONTROL 0x5900
+
+#define	S_SMTD    27
+#define	V_SMTD(x) ((x) << S_SMTD)
+#define	F_SMTD    V_SMTD(1U)
+
+#define	S_SSTD    26
+#define	V_SSTD(x) ((x) << S_SSTD)
+#define	F_SSTD    V_SSTD(1U)
+
+#define	S_SWD0    23
+#define	V_SWD0(x) ((x) << S_SWD0)
+#define	F_SWD0    V_SWD0(1U)
+
+#define	S_SWD1    22
+#define	V_SWD1(x) ((x) << S_SWD1)
+#define	F_SWD1    V_SWD1(1U)
+
+#define	S_SWD2    21
+#define	V_SWD2(x) ((x) << S_SWD2)
+#define	F_SWD2    V_SWD2(1U)
+
+#define	S_SWD3    20
+#define	V_SWD3(x) ((x) << S_SWD3)
+#define	F_SWD3    V_SWD3(1U)
+
+#define	S_SWD4    19
+#define	V_SWD4(x) ((x) << S_SWD4)
+#define	F_SWD4    V_SWD4(1U)
+
+#define	S_SWD5    18
+#define	V_SWD5(x) ((x) << S_SWD5)
+#define	F_SWD5    V_SWD5(1U)
+
+#define	S_SWD6    17
+#define	V_SWD6(x) ((x) << S_SWD6)
+#define	F_SWD6    V_SWD6(1U)
+
+#define	S_SWD7    16
+#define	V_SWD7(x) ((x) << S_SWD7)
+#define	F_SWD7    V_SWD7(1U)
+
+#define	S_SWD8    15
+#define	V_SWD8(x) ((x) << S_SWD8)
+#define	F_SWD8    V_SWD8(1U)
+
+#define	S_SRD0    13
+#define	V_SRD0(x) ((x) << S_SRD0)
+#define	F_SRD0    V_SRD0(1U)
+
+#define	S_SRD1    12
+#define	V_SRD1(x) ((x) << S_SRD1)
+#define	F_SRD1    V_SRD1(1U)
+
+#define	S_SRD2    11
+#define	V_SRD2(x) ((x) << S_SRD2)
+#define	F_SRD2    V_SRD2(1U)
+
+#define	S_SRD3    10
+#define	V_SRD3(x) ((x) << S_SRD3)
+#define	F_SRD3    V_SRD3(1U)
+
+#define	S_SRD4    9
+#define	V_SRD4(x) ((x) << S_SRD4)
+#define	F_SRD4    V_SRD4(1U)
+
+#define	S_SRD5    8
+#define	V_SRD5(x) ((x) << S_SRD5)
+#define	F_SRD5    V_SRD5(1U)
+
+#define	S_SRD6    7
+#define	V_SRD6(x) ((x) << S_SRD6)
+#define	F_SRD6    V_SRD6(1U)
+
+#define	S_SRD7    6
+#define	V_SRD7(x) ((x) << S_SRD7)
+#define	F_SRD7    V_SRD7(1U)
+
+#define	S_SRD8    5
+#define	V_SRD8(x) ((x) << S_SRD8)
+#define	F_SRD8    V_SRD8(1U)
+
+#define	S_CRRE    3
+#define	V_CRRE(x) ((x) << S_CRRE)
+#define	F_CRRE    V_CRRE(1U)
+
+#define	S_CRMC    0
+#define	M_CRMC    0x7U
+#define	V_CRMC(x) ((x) << S_CRMC)
+#define	G_CRMC(x) (((x) >> S_CRMC) & M_CRMC)
+
+#define	A_PCIE_CORE_UTL_STATUS 0x5904
+
+#define	S_USBP    31
+#define	V_USBP(x) ((x) << S_USBP)
+#define	F_USBP    V_USBP(1U)
+
+#define	S_UPEP    30
+#define	V_UPEP(x) ((x) << S_UPEP)
+#define	F_UPEP    V_UPEP(1U)
+
+#define	S_RCEP    29
+#define	V_RCEP(x) ((x) << S_RCEP)
+#define	F_RCEP    V_RCEP(1U)
+
+#define	S_EPEP    28
+#define	V_EPEP(x) ((x) << S_EPEP)
+#define	F_EPEP    V_EPEP(1U)
+
+#define	S_USBS    27
+#define	V_USBS(x) ((x) << S_USBS)
+#define	F_USBS    V_USBS(1U)
+
+#define	S_UPES    26
+#define	V_UPES(x) ((x) << S_UPES)
+#define	F_UPES    V_UPES(1U)
+
+#define	S_RCES    25
+#define	V_RCES(x) ((x) << S_RCES)
+#define	F_RCES    V_RCES(1U)
+
+#define	S_EPES    24
+#define	V_EPES(x) ((x) << S_EPES)
+#define	F_EPES    V_EPES(1U)
+
+#define	A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
+
+#define	S_RNPP    31
+#define	V_RNPP(x) ((x) << S_RNPP)
+#define	F_RNPP    V_RNPP(1U)
+
+#define	S_RPCP    29
+#define	V_RPCP(x) ((x) << S_RPCP)
+#define	F_RPCP    V_RPCP(1U)
+
+#define	S_RCIP    27
+#define	V_RCIP(x) ((x) << S_RCIP)
+#define	F_RCIP    V_RCIP(1U)
+
+#define	S_RCCP    26
+#define	V_RCCP(x) ((x) << S_RCCP)
+#define	F_RCCP    V_RCCP(1U)
+
+#define	S_RFTP    23
+#define	V_RFTP(x) ((x) << S_RFTP)
+#define	F_RFTP    V_RFTP(1U)
+
+#define	S_PTRP    20
+#define	V_PTRP(x) ((x) << S_PTRP)
+#define	F_PTRP    V_PTRP(1U)
+
+#define	A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_ERROR_SEVERITY 0x590c
+
+#define	S_RNPS    31
+#define	V_RNPS(x) ((x) << S_RNPS)
+#define	F_RNPS    V_RNPS(1U)
+
+#define	S_RPCS    29
+#define	V_RPCS(x) ((x) << S_RPCS)
+#define	F_RPCS    V_RPCS(1U)
+
+#define	S_RCIS    27
+#define	V_RCIS(x) ((x) << S_RCIS)
+#define	F_RCIS    V_RCIS(1U)
+
+#define	S_RCCS    26
+#define	V_RCCS(x) ((x) << S_RCCS)
+#define	F_RCCS    V_RCCS(1U)
+
+#define	S_RFTS    23
+#define	V_RFTS(x) ((x) << S_RFTS)
+#define	F_RFTS    V_RFTS(1U)
+
+#define	A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE 0x5910
+