changeset 11304:3092d1e303d6

FWARC 2008/613 KT IOS Performance Counters API FWARC 2008/615 KT Perf Reg HV API FWARC 2009/434 KT IOS Performance Counters API Update FWARC 2009/567 Parallel Boot HV APIs PSARC 2009/177 Solaris support for Rainbow Falls platforms PSARC 2009/389 Sun4v faulted SP events extension PSARC 2009/533 CRYPTO_HMAC_NO_UPDATE - A new KCF SPI flag PSARC 2009/605 more sun4v platform-independent cpu/mem FMA events PSARC 2009/649 Generic PCIe root complex FMA events 6704999 extend xaui enum to work in platform independent world 6773223 RFE: guest epkt for faulted SP 6773225 RFE: Diagnosis of a faulted SP 6797776 Solaris support for Rainbow Falls platforms
author Janie Lu <Janie.Lu@Sun.COM>
date Fri, 11 Dec 2009 10:41:17 -0800
parents 3c4e3958fa37
children 8d46318b62aa
files usr/src/cmd/fm/dicts/PCIEX.dict usr/src/cmd/fm/dicts/PCIEX.po usr/src/cmd/fm/dicts/SUN4V.dict usr/src/cmd/fm/dicts/SUN4V.po usr/src/cmd/fm/eversholt/files/Makefile.com usr/src/cmd/fm/eversholt/files/common/pciexrc.esc usr/src/cmd/fm/eversholt/files/i386/Makefile usr/src/cmd/fm/eversholt/files/sparc/Makefile usr/src/cmd/fm/eversholt/files/sparc/sun4v/Makefile usr/src/cmd/fm/eversholt/files/sparc/sun4v/gcpu.esc usr/src/cmd/fm/eversholt/files/sparc/sun4v/gmem.esc usr/src/cmd/fm/eversholt/files/sparc/sun4v/sp.esc usr/src/cmd/fm/modules/common/fabric-xlate/Makefile usr/src/cmd/fm/modules/common/fabric-xlate/fabric-xlate.c usr/src/cmd/fm/modules/common/fabric-xlate/fabric-xlate.conf usr/src/cmd/fm/modules/common/fabric-xlate/fabric-xlate.h usr/src/cmd/fm/modules/common/fabric-xlate/fx_epkt.c usr/src/cmd/fm/modules/common/fabric-xlate/fx_fabric.c usr/src/cmd/fm/modules/common/fabric-xlate/fx_fire.c usr/src/cmd/fm/modules/common/fabric-xlate/fx_subr.c usr/src/cmd/mdb/common/modules/crypto/impl.c usr/src/lib/brand/solaris10/s10_brand/common/s10_brand.c usr/src/lib/fm/topo/libtopo/common/hc.c usr/src/lib/fm/topo/libtopo/common/topo_hc.h usr/src/lib/fm/topo/modules/sun4v/xaui/Makefile usr/src/lib/fm/topo/modules/sun4v/xaui/xaui.c usr/src/lib/pkcs11/pkcs11_kernel/common/kernelDigest.c usr/src/lib/pkcs11/pkcs11_kernel/common/kernelEmulate.c usr/src/lib/pkcs11/pkcs11_kernel/common/kernelEmulate.h usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSession.c usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSign.c usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSlot.h usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSlottable.c usr/src/lib/pkcs11/pkcs11_kernel/common/kernelVerify.c usr/src/pkgdefs/SUNWfmd/prototype_com usr/src/pkgdefs/SUNWfmd/prototype_sparc usr/src/pkgdefs/SUNWiopc.v/postinstall usr/src/pkgdefs/SUNWiopc.v/preremove usr/src/pkgdefs/SUNWiopc.v/prototype_sparc usr/src/pkgdefs/SUNWn2cp.v/postinstall usr/src/pkgdefs/SUNWnxge.v/postinstall usr/src/pkgdefs/SUNWust2.v/prototype_com usr/src/uts/common/crypto/api/kcf_mac.c usr/src/uts/common/crypto/io/crypto.c usr/src/uts/common/crypto/io/dprov.c usr/src/uts/common/crypto/spi/kcf_spi.c usr/src/uts/common/io/nxge/npi/npi_fflp.c usr/src/uts/common/io/nxge/npi/npi_fflp.h usr/src/uts/common/io/nxge/npi/npi_rxdma.c usr/src/uts/common/io/nxge/npi/npi_rxdma.h usr/src/uts/common/io/nxge/nxge_fflp.c usr/src/uts/common/io/nxge/nxge_fzc.c usr/src/uts/common/io/nxge/nxge_hcall.s usr/src/uts/common/io/nxge/nxge_hio.c usr/src/uts/common/io/nxge/nxge_hv.c usr/src/uts/common/io/nxge/nxge_hw.c usr/src/uts/common/io/nxge/nxge_mac.c usr/src/uts/common/io/nxge/nxge_main.c usr/src/uts/common/io/nxge/nxge_rxdma.c usr/src/uts/common/io/nxge/nxge_virtual.c usr/src/uts/common/io/pciex/pcie.c usr/src/uts/common/sys/cpc_impl.h usr/src/uts/common/sys/crypto/common.h usr/src/uts/common/sys/crypto/impl.h usr/src/uts/common/sys/crypto/ioctl.h usr/src/uts/common/sys/crypto/spi.h usr/src/uts/common/sys/nxge/nxge.h usr/src/uts/common/sys/nxge/nxge_common.h usr/src/uts/common/sys/nxge/nxge_defs.h usr/src/uts/common/sys/nxge/nxge_fflp.h usr/src/uts/common/sys/nxge/nxge_fflp_hw.h usr/src/uts/common/sys/nxge/nxge_flow.h usr/src/uts/common/sys/nxge/nxge_hio.h usr/src/uts/common/sys/nxge/nxge_impl.h usr/src/uts/common/sys/nxge/nxge_n2_esr_hw.h usr/src/uts/common/sys/nxge/nxge_rxdma_hw.h usr/src/uts/sparc/os/driver_aliases usr/src/uts/sun4/io/trapstat.c usr/src/uts/sun4/sys/trapstat.h usr/src/uts/sun4v/Makefile.files usr/src/uts/sun4v/Makefile.rules usr/src/uts/sun4v/Makefile.sun4v.shared usr/src/uts/sun4v/cpu/niagara2.c usr/src/uts/sun4v/cpu/niagara2_asm.s usr/src/uts/sun4v/cpu/niagara_perfctr.c usr/src/uts/sun4v/io/ds_pri.c usr/src/uts/sun4v/io/ds_pri_hcall.s usr/src/uts/sun4v/io/iospc/iospc.c usr/src/uts/sun4v/io/iospc/iospc.conf usr/src/uts/sun4v/io/iospc/iospc.h usr/src/uts/sun4v/io/iospc/rfios_acc.h usr/src/uts/sun4v/io/iospc/rfios_asm.s usr/src/uts/sun4v/io/iospc/rfios_iospc.c usr/src/uts/sun4v/io/iospc/rfios_tables.c usr/src/uts/sun4v/io/iospc/rfios_tables.h usr/src/uts/sun4v/io/n2rng/n2rng.c usr/src/uts/sun4v/io/niumx/niumx.c usr/src/uts/sun4v/io/niumx/niumx_var.h usr/src/uts/sun4v/io/px/px_err.c usr/src/uts/sun4v/io/px/px_err.h usr/src/uts/sun4v/io/px/px_err_gen.c usr/src/uts/sun4v/io/vnet_dds.c usr/src/uts/sun4v/iospc/Makefile usr/src/uts/sun4v/kt/Makefile usr/src/uts/sun4v/kt_pcbe/Makefile usr/src/uts/sun4v/ml/hcall.s usr/src/uts/sun4v/os/error.c usr/src/uts/sun4v/os/hsvc.c usr/src/uts/sun4v/os/mach_cpu_states.c usr/src/uts/sun4v/pcbe/niagara2_pcbe.c usr/src/uts/sun4v/pcbe/niagara_pcbe.c usr/src/uts/sun4v/sys/error.h usr/src/uts/sun4v/sys/hsvc.h usr/src/uts/sun4v/sys/hypervisor_api.h usr/src/uts/sun4v/sys/machparam.h usr/src/uts/sun4v/sys/n2rng.h usr/src/uts/sun4v/sys/niagara2regs.h usr/src/uts/sun4v/sys/niagaraasi.h usr/src/uts/sun4v/sys/niagararegs.h
diffstat 119 files changed, 10550 insertions(+), 2751 deletions(-) [+]
line wrap: on
line diff
--- a/usr/src/cmd/fm/dicts/PCIEX.dict	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/dicts/PCIEX.dict	Fri Dec 11 10:41:17 2009 -0800
@@ -50,3 +50,7 @@
 fault.io.pciex.device-interr-deg=21
 fault.io.pciex.fw_corrupt=22
 fault.io.pciex.fw_mismatch=23
+fault.io.pciex.rc.generic-ce=24
+fault.io.pciex.rc.generic-ue=25
+fault.io.pciex.rc.generic-sw=26
+fault.io.pciex.rc.generic-fw=27
--- a/usr/src/cmd/fm/dicts/PCIEX.po	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/dicts/PCIEX.po	Fri Dec 11 10:41:17 2009 -0800
@@ -390,7 +390,7 @@
 msgid "PCIEX-8000-PY.impact"
 msgstr "Degraded services provided by the device instances associated with this fault.\n"
 msgid "PCIEX-8000-PY.action"
-msgstr "Use fmadm faulty to identify the device and then update the firmware to the latest version or schedule a repair procedure to replace the affected device. Contact Sun support for further information.\n"
+msgstr "Use 'fmadm faulty' to identify the device and then update the firmware to the latest version or schedule a repair procedure to replace the affected device. Contact Sun support for further information.\n"
 #
 # code: PCIEX-8000-Q3
 # keys: fault.io.pciex.fw_mismatch
@@ -406,4 +406,68 @@
 msgid "PCIEX-8000-Q3.impact"
 msgstr "Degraded services provided by the device instances associated with this fault.\n"
 msgid "PCIEX-8000-Q3.action"
-msgstr "Use fmadm faulty to identify the device and then update the firmware to the latest version. Contact Sun support for further information.\n"
+msgstr "Use 'fmadm faulty' to identify the device and then update the firmware to the latest version. Contact Sun support for further information.\n"
+#
+# code: PCIEX-8000-RC
+# keys: fault.io.pciex.rc.generic-ce
+#
+msgid "PCIEX-8000-RC.type"
+msgstr "Fault"
+msgid "PCIEX-8000-RC.severity"
+msgstr "Critical"
+msgid "PCIEX-8000-RC.description"
+msgstr "The number of correctable errors detected in the PCIe Root Complex has crossed the allowed threshold.\n"
+msgid "PCIEX-8000-RC.response"
+msgstr "One or more device instances may be disabled\n"
+msgid "PCIEX-8000-RC.impact"
+msgstr "Loss of services provided by the device\ninstances associated with this fault\n"
+msgid "PCIEX-8000-RC.action"
+msgstr "Schedule a repair procedure to replace the affected\ndevice if necessary, or contact Sun for support.\n"
+#
+# code: PCIEX-8000-SQ
+# keys: fault.io.pciex.rc.generic-ue
+#
+msgid "PCIEX-8000-SQ.type"
+msgstr "Fault"
+msgid "PCIEX-8000-SQ.severity"
+msgstr "Critical"
+msgid "PCIEX-8000-SQ.description"
+msgstr "An uncorrectable error was detected in the PCIe Root Complex.\n  Refer to %s for more information."
+msgid "PCIEX-8000-SQ.response"
+msgstr "One or more device instances may be disabled\n"
+msgid "PCIEX-8000-SQ.impact"
+msgstr "Loss of services provided by the device\ninstances associated with this fault\n"
+msgid "PCIEX-8000-SQ.action"
+msgstr "Schedule a repair procedure to replace the affected\ndevice if necessary, or contact Sun for support.\n"
+#
+# code: PCIEX-8000-T4
+# keys: fault.io.pciex.rc.generic-sw
+#
+msgid "PCIEX-8000-T4.type"
+msgstr "Fault"
+msgid "PCIEX-8000-T4.severity"
+msgstr "Critical"
+msgid "PCIEX-8000-T4.description"
+msgstr "A defect in the PCIe Root Complex or Root Port code has been detected.\n  Refer to %s for more information."
+msgid "PCIEX-8000-T4.response"
+msgstr "One or more device instances may be disabled\n"
+msgid "PCIEX-8000-T4.impact"
+msgstr "Loss of services provided by the device\ninstances associated with this fault\n"
+msgid "PCIEX-8000-T4.action"
+msgstr "Schedule a repair procedure to replace the affected\ndevice if necessary, or contact Sun for support.\n"
+#
+# code: PCIEX-8000-UR
+# keys: fault.io.pciex.rc.generic-fw
+#
+msgid "PCIEX-8000-UR.type"
+msgstr "Fault"
+msgid "PCIEX-8000-UR.severity"
+msgstr "Critical"
+msgid "PCIEX-8000-UR.description"
+msgstr "A defective firmware associated with the PCIe Root Complex has been detected.\n  Refer to %s for more information."
+msgid "PCIEX-8000-UR.response"
+msgstr "One or more device instances may be disabled\n"
+msgid "PCIEX-8000-UR.impact"
+msgstr "Loss of services provided by the device\ninstances associated with this fault\n"
+msgid "PCIEX-8000-UR.action"
+msgstr "Schedule a repair procedure to replace the affected\ndevice if necessary, or contact Sun for support.\n"
--- a/usr/src/cmd/fm/dicts/SUN4V.dict	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/dicts/SUN4V.dict	Fri Dec 11 10:41:17 2009 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 # CDDL HEADER START
@@ -116,3 +116,4 @@
 defect.fw.generic-sparc.addr-oob=88
 defect.fw.generic-sparc.erpt-gen=89
 fault.cpu.generic-sparc.bootbus=90
+fault.sp.failed=91
--- a/usr/src/cmd/fm/dicts/SUN4V.po	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/dicts/SUN4V.po	Fri Dec 11 10:41:17 2009 -0800
@@ -1463,3 +1463,19 @@
 msgstr "The system's integrity is seriously compromised.  Processor chip(s)\nmay be unavailable.\n"
 msgid "SUN4V-8002-T5.action"
 msgstr "Schedule a repair procedure to replace the affected resource, the identity of which can be determined using 'fmadm faulty'.\n"
+#
+# code: SUN4V-8002-US
+# keys: fault.sp.failed
+#
+msgid "SUN4V-8002-US.type"
+msgstr "Fault"
+msgid "SUN4V-8002-US.severity"
+msgstr "Critical"
+msgid "SUN4V-8002-US.description"
+msgstr "The Service Processor failed.\n  Refer to %s for more information."
+msgid "SUN4V-8002-US.response"
+msgstr "No automated response.\n"
+msgid "SUN4V-8002-US.impact"
+msgstr "Some services such as Fault Diagnosis may be degraded as a result.\n"
+msgid "SUN4V-8002-US.action"
+msgstr "Schedule a repair procedure for the Service Processor, or contact Sun for support.\n"
--- a/usr/src/cmd/fm/eversholt/files/Makefile.com	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/eversholt/files/Makefile.com	Fri Dec 11 10:41:17 2009 -0800
@@ -19,10 +19,9 @@
 # CDDL HEADER END
 #
 #
-# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
-#ident	"%Z%%M%	%I%	%E% SMI"
 
 .SUFFIXES: .eft .esc
 
@@ -54,8 +53,11 @@
 	$(RM) $(EFT_PLAT_FILES) $(EFT_COMMON_FILES) \
 	$(USR_PLAT_EFT_FILES) $(ROOT_COMMON_EFT_FILES)
 
+ESCFLAGS= -D_ESC -I$(ROOT)/usr/include
+pciexrc.eft := ESCFLAGS += -I$(SRC)/uts/sun4v/io/px
+
 %.eft: ../common/%.esc
-	$(ESC) -I$(ROOT)/usr/include  -o $@ $<
+	$(ESC) $(ESCFLAGS) -o $@ $<
 
 %.eft: %.esc
-	$(ESC) -o $@ $<
+	$(ESC) $(ESCFLAGS) -o $@ $<
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/fm/eversholt/files/common/pciexrc.esc	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,218 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma dictionary "PCIEX"
+
+#include <px_err.h>
+
+/*
+ * generic root complex/root port diagnosis rules
+ */
+
+#define	PCIEXFN		pciexbus/pciexdev/pciexfn
+#define	PCIEXFNHZ	pciexbus<>/pciexdev<>/pciexfn<>
+
+#define RC_N    5
+#define RC_T    72h
+
+#define SW_FIT    5000
+#define FW_FIT    5000
+#define HB_FIT    400
+
+#define EPKT_DESC       (payloadprop("desc") >> 12)
+#define EPKT_B_BIT      (payloadprop("desc") & (1 << 7))
+#define EPKT_C_BIT      (payloadprop("desc") & (1 << 5))
+#define EPKT_H_BIT      (payloadprop("desc") & (1 << 4))
+
+#define MATCHES_DESC(b, o, p, c, d) \
+    (EPKT_DESC == (b << 16 | o << 12 | p << 8 | c << 4 | d))
+
+#define IS_CE (EPKT_C_BIT != 0 && setserdsuffix(EPKT_DESC))
+#define IS_UE (EPKT_C_BIT == 0)
+#define IS_BLOCKED (EPKT_B_BIT != 0)
+
+#define EPKT(b, o, p, c, d) \
+    ereport.io.pciex.rc.epkt@hostbridge { MATCHES_DESC(b, o, p, c, d) }
+
+/* Ereport Events */
+event ereport.io.pciex.rc.epkt@hostbridge {within(5s)};
+
+/* Internal Events */
+event error.io.pciex.rc.stall@hostbridge;
+event error.io.pciex.rc.poiscomp@hostbridge;
+event error.io.pciex.nr-d@hostbridge/pciexrc/PCIEXFN;
+event error.io.pciex.badreq-u@hostbridge/pciexrc/PCIEXFN;
+event error.io.pciex.poiscomp-d@hostbridge/pciexrc/PCIEXFN;
+event error.io.pciex.noimpact-d@hostbridge/pciexrc/PCIEXFN;
+event error.io.pciex.lost-d@hostbridge/pciexrc/PCIEXFN;
+event error.io.pciex.degraded-d@hostbridge/pciexrc/PCIEXFN;
+
+/* Upset event */
+event upset.io.pciex.rc.discard@hostbridge;
+
+/*
+ * Fault Events
+ * Do no retire and FRUs for SW/FW faults
+ */
+event fault.io.pciex.rc.generic-ue@hostbridge,
+    FITrate=HB_FIT, retire=0, response=0;
+event fault.io.pciex.rc.generic-sw@hostbridge,
+    FITrate=SW_FIT, retire=0, response=0;
+event fault.io.pciex.rc.generic-fw@hostbridge,
+    FITrate=FW_FIT, retire=0, response=0;
+
+/* Serd engine for CE errors */
+engine serd.io.pciex.rc.generic-ce@hostbridge, N=RC_N, T=RC_T;
+event fault.io.pciex.rc.generic-ce@hostbridge, FITrate=HB_FIT,
+    engine=serd.io.pciex.rc.generic-ce@hostbridge;
+
+/* Fire faults */
+event fault.io.fire.pciex.device@PCIEXFN, FITrate=1000;
+event fault.io.fire.pci.device@pcibus/pcidev/pcifn, FITrate=1000;
+
+/* Generic Root Complex Software faults */
+prop fault.io.pciex.rc.generic-sw@hostbridge ->
+    ereport.io.pciex.rc.epkt@hostbridge {
+	MATCHES_DESC(BLOCK_INTR,OP_FIXED,PH_UNKNOWN,CND_ILL,DIR_INGRESS)  ||
+	MATCHES_DESC(BLOCK_INTR,OP_MSI32,PH_UNKNOWN,CND_ILL,DIR_IRR)  ||
+	MATCHES_DESC(BLOCK_INTR,OP_PCIEMSG,PH_UNKNOWN,CND_ILL,DIR_INGRESS)
+    };
+
+/* Generic Root Complex Firmware faults */
+prop fault.io.pciex.rc.generic-fw@hostbridge ->
+    ereport.io.pciex.rc.epkt@hostbridge {
+	MATCHES_DESC(BLOCK_HOSTBUS,OP_PIO,PH_ADDR,CND_UNMAP,DIR_WRITE)
+    };
+
+/* Generic Root Complex CE faults */
+prop fault.io.pciex.rc.generic-ce@hostbridge { IS_CE } -> 
+    ereport.io.pciex.rc.epkt@hostbridge;
+
+/* Generic Root Complex UE faults from propagations */
+event error.io.pciex.rc.generic-ue1@hostbridge;
+event error.io.pciex.rc.generic-ue2@hostbridge;
+
+prop fault.io.pciex.rc.generic-ue@hostbridge -> 
+    error.io.pciex.rc.generic-ue1@hostbridge,
+    error.io.pciex.rc.generic-ue2@hostbridge,
+    error.io.pciex.rc.stall@hostbridge,
+    error.io.pciex.rc.poiscomp@hostbridge;
+
+/* Generic Root Complex UE propagations */
+prop error.io.pciex.rc.generic-ue1@hostbridge { IS_UE && !IS_BLOCKED } ->
+    ereport.io.pciex.rc.epkt@hostbridge {
+	MATCHES_DESC(BLOCK_HOSTBUS,OP_DMA,PH_DATA,CND_INT,DIR_READ) ||
+	MATCHES_DESC(BLOCK_HOSTBUS,OP_DMA,PH_DATA,CND_INT,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_HOSTBUS,OP_DMA,PH_DATA,CND_INT,DIR_WRITE) ||
+	MATCHES_DESC(BLOCK_HOSTBUS,OP_DMA,PH_DATA,CND_TO,DIR_READ) ||
+	MATCHES_DESC(BLOCK_HOSTBUS,OP_DMA,PH_DATA,CND_TO,DIR_WRITE) ||
+	MATCHES_DESC(BLOCK_HOSTBUS,OP_PIO,PH_DATA,CND_INT,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_HOSTBUS,OP_UNKNOWN,PH_DATA,CND_INT,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_HOSTBUS,OP_UNKNOWN,PH_DATA,CND_INT,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_INTR,OP_MSI32,PH_DATA,CND_INT,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_INTR,OP_MSIQ,PH_DATA,CND_INT,DIR_UNKNOWN)
+    };
+
+prop error.io.pciex.rc.generic-ue2@hostbridge { IS_UE && !IS_BLOCKED } ->
+    ereport.io.pciex.rc.epkt@hostbridge {
+	MATCHES_DESC(BLOCK_MMU,OP_TBW,PH_ADDR,CND_UNKNOWN,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_MMU,OP_TBW,PH_ADDR,CND_UNMAP,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_MMU,OP_TBW,PH_DATA,CND_INT,DIR_IRR) ||
+	MATCHES_DESC(BLOCK_MMU,OP_TBW,PH_UNKNOWN,CND_UNKNOWN,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_MMU,OP_XLAT,PH_DATA,CND_INT,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_PORT,OP_DMA,PH_DATA,CND_INT,DIR_READ) ||
+	MATCHES_DESC(BLOCK_PORT,OP_PIO,PH_DATA,CND_INT,DIR_READ) ||
+	MATCHES_DESC(BLOCK_PORT,OP_PIO,PH_DATA,CND_INT,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_PORT,OP_UNKNOWN,PH_DATA,CND_INT,DIR_UNKNOWN) ||
+	MATCHES_DESC(BLOCK_PORT,OP_UNKNOWN,PH_DATA,CND_INT,DIR_UNKNOWN)
+    };
+
+/* Errors that will cause a pipe stall and thus a CTO in the fabric */
+prop error.io.pciex.rc.stall@hostbridge (0) -> 
+    error.io.pciex.nr-d@hostbridge/pciexrc<>/PCIEXFNHZ;
+
+prop error.io.pciex.rc.stall@hostbridge { IS_UE && IS_BLOCKED } -> 
+    ereport.io.pciex.rc.epkt@hostbridge;
+
+/*
+ * Errors that will send a poisoned data to the fabric
+ * Also the poiscomp-d could represent a fault that a hardened driver
+ * handled and reported a service impact.
+ */
+prop error.io.pciex.rc.poiscomp@hostbridge (0) ->
+    error.io.pciex.poiscomp-d@hostbridge/pciexrc<>/PCIEXFNHZ,
+    error.io.pciex.noimpact-d@hostbridge/pciexrc<>/PCIEXFNHZ,
+    error.io.pciex.lost-d@hostbridge/pciexrc<>/PCIEXFNHZ,
+    error.io.pciex.degraded-d@hostbridge/pciexrc<>/PCIEXFNHZ;
+
+prop error.io.pciex.rc.poiscomp@hostbridge { IS_UE && !IS_BLOCKED } -> 
+    ereport.io.pciex.rc.epkt@hostbridge {
+	MATCHES_DESC(BLOCK_HOSTBUS,OP_DMA,PH_DATA,CND_INT,DIR_READ)
+    };
+
+prop error.io.pciex.badreq-u@hostbridge/pciexrc/PCIEXFN { IS_UE && !IS_BLOCKED } (0) -> 
+    ereport.io.pciex.rc.epkt@hostbridge {
+	MATCHES_DESC(BLOCK_MMU,OP_XLAT,PH_ADDR,CND_UNMAP,DIR_RDWR) ||
+	MATCHES_DESC(BLOCK_MMU,OP_XLAT,PH_DATA,CND_INV,DIR_RDWR) ||
+	MATCHES_DESC(BLOCK_MMU,OP_XLAT,PH_DATA,CND_PROT,DIR_RDWR)
+    };
+
+prop upset.io.pciex.rc.discard@hostbridge -> 
+    ereport.io.pciex.rc.epkt@hostbridge {
+	MATCHES_DESC(BLOCK_INTR,OP_MSI32,PH_DATA,CND_ILL,DIR_IRR) ||
+	MATCHES_DESC(BLOCK_PORT,OP_LINK,PH_FC,CND_TO,DIR_IRR) ||
+	MATCHES_DESC(BLOCK_PORT,OP_PIO,PH_IRR,CND_INV,DIR_RDWR) ||
+	MATCHES_DESC(BLOCK_PORT,OP_PIO,PH_IRR,CND_RCA,DIR_WRITE) ||
+	MATCHES_DESC(BLOCK_PORT,OP_PIO,PH_IRR,CND_RUR,DIR_WRITE) ||
+	MATCHES_DESC(BLOCK_PORT,OP_PIO,PH_IRR,CND_TO,DIR_READ) ||
+	MATCHES_DESC(BLOCK_PORT,OP_PIO,PH_IRR,CND_TO,DIR_WRITE) ||
+	MATCHES_DESC(BLOCK_PORT,OP_PIO,PH_IRR,CND_UC,DIR_IRR)
+    };
+
+/* Event queue overflow */
+#define PROP_PLAT_FRU "FRU"
+#define GET_HB_FRU (confprop(asru(hostbridge), PROP_PLAT_FRU))
+#define GET_PCIE_FRU (confprop(asru(pciexbus[b]/pciexdev[d]/pciexfn[0]), PROP_PLAT_FRU))
+#define GET_PCI_FRU (confprop(asru(pcibus[b]/pcidev[d]/pcifn[0]), PROP_PLAT_FRU))
+
+prop fault.io.fire.pciex.device@pciexbus[b]/pciexdev[d]/pciexfn[0]
+    {  
+        /*
+         * Indict PCI-E FRU(s) under this root complex excluding the 
+         * one that the Fire ASIC resides on. 
+         */
+        is_under(hostbridge, pciexbus[b]/pciexdev[d]/pciexfn[0]) &&
+	(GET_HB_FRU != GET_PCIE_FRU)
+    } (0) -> EPKT(BLOCK_INTR,OP_MSIQ,PH_UNKNOWN,CND_OV,DIR_IRR);
+
+prop fault.io.fire.pci.device@pcibus[b]/pcidev[d]/pcifn[0]
+    {  
+        /*
+         * Indict PCI FRU(s) under this root complex excluding the 
+         * one that the Fire ASIC resides on. 
+         */
+        is_under(hostbridge, pcibus[b]/pcidev[d]/pcifn[0]) &&
+	    (GET_HB_FRU != GET_PCI_FRU)
+    } (0) -> EPKT(BLOCK_INTR,OP_MSIQ,PH_UNKNOWN,CND_OV,DIR_IRR);
--- a/usr/src/cmd/fm/eversholt/files/i386/Makefile	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/eversholt/files/i386/Makefile	Fri Dec 11 10:41:17 2009 -0800
@@ -30,6 +30,7 @@
 	neptune_xfp.eft 	\
 	pci.eft			\
 	pciex.eft		\
+	pciexrc.eft		\
 	sca500.eft		\
 	sca1000.eft 		\
 	sensor.eft
--- a/usr/src/cmd/fm/eversholt/files/sparc/Makefile	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/eversholt/files/sparc/Makefile	Fri Dec 11 10:41:17 2009 -0800
@@ -30,6 +30,7 @@
 	neptune_xfp.eft		\
 	pci.eft			\
 	pciex.eft		\
+	pciexrc.eft		\
 	sca500.eft		\
 	sca1000.eft 		\
 	sensor.eft
--- a/usr/src/cmd/fm/eversholt/files/sparc/sun4v/Makefile	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/eversholt/files/sparc/sun4v/Makefile	Fri Dec 11 10:41:17 2009 -0800
@@ -19,7 +19,7 @@
 # CDDL HEADER END
 #
 #
-# Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 #
@@ -28,7 +28,7 @@
 
 EFT_PLAT= sun4v
 SUN4VEFTFILES= n2piu.eft vfncx.eft n2niu_xaui.eft n2niu_xfp.eft zambezi.eft \
-gcpu.eft gmem.eft
+gcpu.eft gmem.eft sp.eft
 EFT_PLAT_FILES= $(SUN4VEFTFILES) $(SUN4EFTFILES)
 
 include ../../Makefile.com
--- a/usr/src/cmd/fm/eversholt/files/sparc/sun4v/gcpu.esc	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/eversholt/files/sparc/sun4v/gcpu.esc	Fri Dec 11 10:41:17 2009 -0800
@@ -437,10 +437,11 @@
     { !DIAGNOSE_ERPT } (0) ->
     ereport.cpu.generic-sparc.inconsistent@chassis;
 /*
- * bootbus-to and bootbus-par errors. Fault the detector.
+ * bootbus-prot, bootbus-to and bootbus-par errors. Fault the detector.
  */
 event ereport.cpu.generic-sparc.bootbus-to@chip;
 event ereport.cpu.generic-sparc.bootbus-par@chip;
+event ereport.cpu.generic-sparc.bootbus-prot@chip;
 event upset.cpu.generic-sparc.bootbus@chip;
 
 event fault.cpu.generic-sparc.bootbus@chip, retire=0;
@@ -453,10 +454,15 @@
     { DIAGNOSE_ERPT } (0) ->
     ereport.cpu.generic-sparc.bootbus-par@chip;
 
+prop fault.cpu.generic-sparc.bootbus@chip
+    { DIAGNOSE_ERPT } (0) ->
+    ereport.cpu.generic-sparc.bootbus-prot@chip;
+
 prop upset.cpu.generic-sparc.bootbus@chip
     { !DIAGNOSE_ERPT } (0) ->
     ereport.cpu.generic-sparc.bootbus-to@chip,
-    ereport.cpu.generic-sparc.bootbus-par@chip;
+    ereport.cpu.generic-sparc.bootbus-par@chip,
+    ereport.cpu.generic-sparc.bootbus-prot@chip;
 
 /*
  * ignore the pio-read error.
--- a/usr/src/cmd/fm/eversholt/files/sparc/sun4v/gmem.esc	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/eversholt/files/sparc/sun4v/gmem.esc	Fri Dec 11 10:41:17 2009 -0800
@@ -184,7 +184,7 @@
     ereport.cpu.generic-sparc.membuf-crc@MEM_CTRL;
 
 /*
- * membuf-crc-uc will fault the detector FRU and sender FRU
+ * membuf-crc-uc, membuf-other-uc will fault the detector FRU and sender FRU
  * if detector is CHIP or MEM_CTRL, the sender is MEM_BUFF.
  * if detector is MEM_BUFF, the sender is CHIP or MEM_CTRL
  */
@@ -192,6 +192,10 @@
 event ereport.cpu.generic-sparc.membuf-crc-uc@MEM_BUFF { within(1s) };
 event ereport.cpu.generic-sparc.membuf-crc-uc@MEM_CTRL { within(1s) };
 
+event ereport.cpu.generic-sparc.membuf-other-uc@CHIP { within(1s) };
+event ereport.cpu.generic-sparc.membuf-other-uc@MEM_BUFF { within(1s) };
+event ereport.cpu.generic-sparc.membuf-other-uc@MEM_CTRL { within(1s) };
+
 event fault.memory.memlink-uc@CHIP;
 event fault.memory.memlink-uc@MEM_BUFF;
 event fault.memory.memlink-uc@MEM_CTRL;
@@ -201,55 +205,66 @@
  */
 prop fault.memory.memlink-uc@CHIP
     { DIAGNOSE_ERPT } (0) ->
-    ereport.cpu.generic-sparc.membuf-crc-uc@CHIP;
+    ereport.cpu.generic-sparc.membuf-crc-uc@CHIP,
+    ereport.cpu.generic-sparc.membuf-other-uc@CHIP;
 
 prop fault.memory.memlink-uc@MEM_BUFF
     { DIAGNOSE_ERPT && CONTAINS_MEMBUFF } (0) ->
-    ereport.cpu.generic-sparc.membuf-crc-uc@CHIP<>;
+    ereport.cpu.generic-sparc.membuf-crc-uc@CHIP<>,
+    ereport.cpu.generic-sparc.membuf-other-uc@CHIP<>;
 
 event upset.memory.memlink-uc@CHIP;
 
 prop upset.memory.memlink-uc@CHIP
     { !DIAGNOSE_ERPT } (0)->
-    ereport.cpu.generic-sparc.membuf-crc-uc@CHIP;
+    ereport.cpu.generic-sparc.membuf-crc-uc@CHIP,
+    ereport.cpu.generic-sparc.membuf-other-uc@CHIP;
 
 /*
  * memory-buffer is detector
  */
 prop fault.memory.memlink-uc@MEM_BUFF
     { DIAGNOSE_ERPT } (0) ->
-    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_BUFF;
+    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_BUFF,
+    ereport.cpu.generic-sparc.membuf-other-uc@MEM_BUFF;
 
 prop fault.memory.memlink-uc@CHIP
     { DIAGNOSE_ERPT && CONTAINS_CHIP } (0) ->
-    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_BUFF<>;
+    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_BUFF<>,
+    ereport.cpu.generic-sparc.membuf-other-uc@MEM_BUFF<>;
 
 prop fault.memory.memlink-uc@MEM_CTRL
     { DIAGNOSE_ERPT && CONTAINS_MEMCTRL } (0) ->
-    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_BUFF<>;
+    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_BUFF<>,
+    ereport.cpu.generic-sparc.membuf-other-uc@MEM_BUFF<>;
 
 event upset.memory.memlink-uc@MEM_BUFF;
 
 prop upset.memory.memlink-uc@MEM_BUFF
     { !DIAGNOSE_ERPT } (0)->
-    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_BUFF;
+    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_BUFF,
+    ereport.cpu.generic-sparc.membuf-other-uc@MEM_BUFF;
 
 /*
  * memory-controller is detector
  */
 prop fault.memory.memlink-uc@MEM_CTRL
     { DIAGNOSE_ERPT } (0) ->
-    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_CTRL;
+    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_CTRL,
+    ereport.cpu.generic-sparc.membuf-other-uc@MEM_CTRL;
 
 prop fault.memory.memlink-uc@MEM_BUFF
     { DIAGNOSE_ERPT && CONTAINS_MEMBUFF } (0) ->
-    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_CTRL<>;
+    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_CTRL<>,
+    ereport.cpu.generic-sparc.membuf-other-uc@MEM_CTRL<>;
 
 event upset.memory.memlink-uc@MEM_CTRL;
 
 prop upset.memory.memlink-uc@MEM_CTRL
     { !DIAGNOSE_ERPT } (0)->
-    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_CTRL;
+    ereport.cpu.generic-sparc.membuf-crc-uc@MEM_CTRL,
+    ereport.cpu.generic-sparc.membuf-other-uc@MEM_CTRL;
+
 /*
  * membuf-crc-failover will fault the detector FRU and sender FRU
  * if detector is chip or memory-controller, the sender is memory-buffer.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/fm/eversholt/files/sparc/sun4v/sp.esc	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,37 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma	dictionary "SUN4V"
+
+/*
+ * Eversholt rule for a faulted Service Processor
+ */
+
+event fault.sp.failed@sp;
+
+event ereport.chassis.sp.unavailable@chassis;
+
+prop fault.sp.failed@sp ->
+	ereport.chassis.sp.unavailable@chassis;
--- a/usr/src/cmd/fm/modules/common/fabric-xlate/Makefile	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/modules/common/fabric-xlate/Makefile	Fri Dec 11 10:41:17 2009 -0800
@@ -19,19 +19,18 @@
 # CDDL HEADER END
 #
 #
-# Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
-# ident	"%Z%%M%	%I%	%E% SMI"
-#
 
 MODULE = fabric-xlate
 CLASS = common
-SRCS = fabric-xlate.c
+SRCS = fabric-xlate.c fx_epkt.c fx_fabric.c fx_fire.c fx_subr.c
 
 include ../../Makefile.plugin
 
 CPPFLAGS += -I/usr/include/libxml2 -I$(KMFDIR)/include -I.
+CPPFLAGS += -I$(SRC)/uts/sun4v/io/px
 INCDIRS = $(SRC)/uts/common
 CFLAGS += -I$(INCDIRS)
 LINTFLAGS += -I$(INCDIRS)
--- a/usr/src/cmd/fm/modules/common/fabric-xlate/fabric-xlate.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/modules/common/fabric-xlate/fabric-xlate.c	Fri Dec 11 10:41:17 2009 -0800
@@ -23,1500 +23,22 @@
  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
-#include <assert.h>
-#include <stddef.h>
-#include <errno.h>
-#include <strings.h>
-#include <fm/fmd_api.h>
 #include <fm/libtopo.h>
 #include <sys/fm/util.h>
-#include <sys/fm/protocol.h>
-#include <sys/fm/io/pci.h>
-#include <sys/fm/io/sun4_fire.h>
-#include <sys/pci.h>
-#include <sys/pcie.h>
-#include <sys/nvpair.h>
-#include <sys/nvpair_impl.h>
 
-#include <libxml/xpath.h>
-#include <libxml/parser.h>
 #include <libxml/xpathInternals.h>
-#include <libxml/tree.h>
-
-/* PCI-E config space data for error handling and fabric ereports */
-typedef struct fab_data {
-	/* Original ereport NVL */
-	nvlist_t	*nvl;
-
-	/* Device Information */
-	uint16_t bdf;
-	uint16_t device_id;
-	uint16_t vendor_id;
-	uint8_t rev_id;
-	uint16_t dev_type;
-	uint16_t pcie_off;
-	uint16_t pcix_off;
-	uint16_t aer_off;
-	uint16_t ecc_ver;
-
-	/* Ereport Information */
-	uint32_t remainder;
-	uint32_t severity;
-
-	/* Error Registers */
-	uint16_t pci_err_status;	/* pci status register */
-	uint16_t pci_cfg_comm;		/* pci command register */
-
-	uint16_t pci_bdg_sec_stat;	/* PCI secondary status reg */
-	uint16_t pci_bdg_ctrl;		/* PCI bridge control reg */
-
-	uint16_t pcix_command;		/* pcix command register */
-	uint32_t pcix_status;		/* pcix status register */
-
-	uint16_t pcix_bdg_sec_stat;	/* pcix bridge secondary status reg */
-	uint32_t pcix_bdg_stat;		/* pcix bridge status reg */
-
-	uint16_t pcix_ecc_control_0;	/* pcix ecc control status reg */
-	uint16_t pcix_ecc_status_0;	/* pcix ecc control status reg */
-	uint32_t pcix_ecc_fst_addr_0;	/* pcix ecc first address reg */
-	uint32_t pcix_ecc_sec_addr_0;	/* pcix ecc second address reg */
-	uint32_t pcix_ecc_attr_0;	/* pcix ecc attributes reg */
-	uint16_t pcix_ecc_control_1;	/* pcix ecc control status reg */
-	uint16_t pcix_ecc_status_1;	/* pcix ecc control status reg */
-	uint32_t pcix_ecc_fst_addr_1;	/* pcix ecc first address reg */
-	uint32_t pcix_ecc_sec_addr_1;	/* pcix ecc second address reg */
-	uint32_t pcix_ecc_attr_1;	/* pcix ecc attributes reg */
-
-	uint16_t pcie_err_status;	/* pcie device status register */
-	uint16_t pcie_err_ctl;		/* pcie error control register */
-	uint32_t pcie_dev_cap;		/* pcie device capabilities register */
 
-	uint32_t pcie_adv_ctl;		/* pcie advanced control reg */
-	uint32_t pcie_ue_status;	/* pcie ue error status reg */
-	uint32_t pcie_ue_mask;		/* pcie ue error mask reg */
-	uint32_t pcie_ue_sev;		/* pcie ue error severity reg */
-	uint32_t pcie_ue_hdr[4];	/* pcie ue header log */
-	uint32_t pcie_ce_status;	/* pcie ce error status reg */
-	uint32_t pcie_ce_mask;		/* pcie ce error mask reg */
-	uint32_t pcie_ue_tgt_trans;	/* Fault trans type from AER Logs */
-	uint64_t pcie_ue_tgt_addr;	/* Fault addr from AER Logs */
-	pcie_req_id_t pcie_ue_tgt_bdf;	/* Fault bdf from SAER Logs */
-
-	uint32_t pcie_sue_ctl;		/* pcie bridge secondary ue control */
-	uint32_t pcie_sue_status;	/* pcie bridge secondary ue status */
-	uint32_t pcie_sue_mask;		/* pcie bridge secondary ue mask */
-	uint32_t pcie_sue_sev;		/* pcie bridge secondary ue severity */
-	uint32_t pcie_sue_hdr[4];	/* pcie bridge secondary ue hdr log */
-	uint32_t pcie_sue_tgt_trans;	/* Fault trans type from AER Logs */
-	uint64_t pcie_sue_tgt_addr;	/* Fault addr from AER Logs */
-	pcie_req_id_t pcie_sue_tgt_bdf;	/* Fault bdf from SAER Logs */
-
-	uint32_t pcie_rp_status;	/* root complex status register */
-	uint16_t pcie_rp_ctl;		/* root complex control register */
-	uint32_t pcie_rp_err_status;	/* pcie root complex error status reg */
-	uint32_t pcie_rp_err_cmd;	/* pcie root complex error cmd reg */
-	uint16_t pcie_rp_ce_src_id;	/* pcie root complex ce sourpe id */
-	uint16_t pcie_rp_ue_src_id;	/* pcie root complex ue sourpe id */
-} fab_data_t;
+#include "fabric-xlate.h"
 
-/*
- * These values are used for the xxx_tgt_trans value in fab_data_t.  They are
- * originally set in pcie_fault.c and originally defined in pcie_impl.h.
- */
-#define	PF_ADDR_DMA		(1 << 0)
-#define	PF_ADDR_PIO		(1 << 1)
-#define	PF_ADDR_CFG		(1 << 2)
-
-typedef struct fab_erpt_tbl {
-	const char	*err_class;	/* Final Ereport Class */
-	uint32_t	reg_bit;	/* Error Bit Mask */
-	/* Pointer to function that prepares the ereport body */
-	const char	*tgt_class;	/* Target Ereport Class */
-} fab_erpt_tbl_t;
-
-typedef struct fab_err_tbl {
-	fab_erpt_tbl_t	*erpt_tbl;	/* ereport table */
-	uint32_t	reg_offset;	/* sts reg for ereport table offset */
-	uint32_t	reg_size;	/* size of the status register */
-	/* Pointer to function that prepares the ereport body */
-	int		(*fab_prep)(fmd_hdl_t *, fab_data_t *, nvlist_t *,
-	    fab_erpt_tbl_t *);
-} fab_err_tbl_t;
-
-typedef struct fab_fire_tbl {
-	const char	*err_class;
-	uint32_t	fire_bit;	/* Fire error bit */
-	uint16_t	pci_err_sts;	/* Equivalent PCI Error Status */
-	uint16_t	pci_bdg_sts;	/* Equivalent PCI Bridge Status */
-} fab_fire_tbl_t;
-
-/* Static FM Topo XML Format and XML XPath Context  */
-static xmlDocPtr		fab_doc = NULL;
-static xmlXPathContextPtr	fab_xpathCtx = NULL;
-static int			fab_valid_topo = 0;
 #define	XMLTOPOFILE "/tmp/fab-xlate-topo.xml"
 
-/* Functions that convert ereports to a common C data structure */
-static void fab_pci_fabric_to_data(fmd_hdl_t *hdl, nvlist_t *nvl,
-    fab_data_t *data);
-static void fab_fire_to_data(fmd_hdl_t *hdl, nvlist_t *nvl, fab_data_t *data);
-
-/* Common functions for sending translated ereports */
-static int fab_prep_basic_erpt(fmd_hdl_t *hdl, nvlist_t *nvl, nvlist_t *erpt,
-    boolean_t isRC);
-static boolean_t fab_get_rcpath(fmd_hdl_t *hdl, nvlist_t *nvl, char *rcpath);
-static char *fab_find_addr(fmd_hdl_t *hdl, nvlist_t *nvl, uint64_t addr);
-static char *fab_find_bdf(fmd_hdl_t *hdl, nvlist_t *nvl, pcie_req_id_t bdf);
-static void fab_send_tgt_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    const char *class, boolean_t isPrimary);
-static void fab_send_erpt(fmd_hdl_t *hdl, fab_data_t *data, fab_err_tbl_t *tbl);
-
-/*
- * Common functions for converting  pci.fabric classes of
- * ereports
- */
-static int fab_prep_pci_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-static int fab_prep_pci_bdg_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-static int fab_prep_pci_bdg_ctl_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-static int fab_prep_pcie_ce_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-static int fab_prep_pcie_ue_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-static int fab_prep_pcie_sue_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-static int fab_prep_pcie_nadv_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-static int fab_prep_pcix_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-static void fab_send_pcix_ecc_erpt(fmd_hdl_t *hdl, fab_data_t *data);
-static int fab_prep_pcix_bdg_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-static void fab_send_pcix_bdg_ecc_erpt(fmd_hdl_t *hdl, fab_data_t *data);
-static int fab_prep_pcie_rc_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-static int fab_prep_pcie_fake_rc_erpt(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *erpt, fab_erpt_tbl_t *table);
-
-/* Functions for converting fire specific error registers */
-static int fab_xlate_fire_ce(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    const char *class);
-static int fab_xlate_fire_ue(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    const char *class);
-static int fab_xlate_fire_oe(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    const char *class);
-static int fab_xlate_fire_dmc(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    const char *class);
-
-/* Main functions for converting "fabric" ereports */
-static void fab_xlate_pcie_erpts(fmd_hdl_t *hdl, fab_data_t *data);
-static void fab_xlate_fire_erpts(fmd_hdl_t *hdl, fab_data_t *data,
-    nvlist_t *nvl, const char *class);
-
-/*
- * Translation tables for converting "fabric" error bits into "pci" ereports.
- * <Ereport Class Name>, <Error Bit Mask>, <Preparation Function>
- */
-
-/* MACRO for table entries with no TGT ereports */
-#define	NT(class, bit, prep) class, bit, prep, NULL
-/* Translate Fabric ereports to ereport.io.pci.* */
-static fab_erpt_tbl_t fab_pci_erpt_tbl[] = {
-	PCI_DET_PERR,		PCI_STAT_PERROR,	NULL,
-	PCI_MDPE,		PCI_STAT_S_PERROR,	NULL,
-	PCI_SIG_SERR,		PCI_STAT_S_SYSERR,	NULL,
-	PCI_MA,			PCI_STAT_R_MAST_AB,	NULL,
-	PCI_REC_TA,		PCI_STAT_R_TARG_AB,	NULL,
-	PCI_SIG_TA,		PCI_STAT_S_TARG_AB,	NULL,
-	NULL, NULL, NULL
-};
-
-/* Translate Fabric ereports to ereport.io.pci.sec-* */
-static fab_erpt_tbl_t fab_pci_bdg_erpt_tbl[] = {
-	PCI_DET_PERR,		PCI_STAT_PERROR,	NULL,
-	PCI_MDPE,		PCI_STAT_S_PERROR,	NULL,
-	PCI_REC_SERR,		PCI_STAT_S_SYSERR,	NULL,
-#ifdef sparc
-	PCI_MA,			PCI_STAT_R_MAST_AB,	NULL,
-#endif
-	PCI_REC_TA,		PCI_STAT_R_TARG_AB,	NULL,
-	PCI_SIG_TA,		PCI_STAT_S_TARG_AB,	NULL,
-	NULL, NULL, NULL, NULL,
-};
-
-
-/* Translate Fabric ereports to ereport.io.pci.dto */
-static fab_erpt_tbl_t fab_pci_bdg_ctl_erpt_tbl[] = {
-	PCI_DTO,	PCI_BCNF_BCNTRL_DTO_STAT,	NULL,
-	NULL, NULL, NULL
-};
-
-/* Translate Fabric ereports to ereport.io.pciex.* */
-static fab_erpt_tbl_t fab_pcie_ce_erpt_tbl[] = {
-	PCIEX_RE,	PCIE_AER_CE_RECEIVER_ERR,	NULL,
-	PCIEX_RNR,	PCIE_AER_CE_REPLAY_ROLLOVER,	NULL,
-	PCIEX_RTO,	PCIE_AER_CE_REPLAY_TO,		NULL,
-	PCIEX_BDP,	PCIE_AER_CE_BAD_DLLP,		NULL,
-	PCIEX_BTP,	PCIE_AER_CE_BAD_TLP,		NULL,
-	PCIEX_ANFE,	PCIE_AER_CE_AD_NFE,		NULL,
-	NULL, NULL, NULL
-};
-
-/* Translate Fabric ereports to ereport.io.pciex.* */
-static fab_erpt_tbl_t fab_pcie_ue_erpt_tbl[] = {
-	PCIEX_TE,	PCIE_AER_UCE_TRAINING,		NULL,
-	PCIEX_DLP,	PCIE_AER_UCE_DLP,		NULL,
-	PCIEX_SD,	PCIE_AER_UCE_SD,		NULL,
-	PCIEX_ROF,	PCIE_AER_UCE_RO,		NULL,
-	PCIEX_FCP,	PCIE_AER_UCE_FCP,		NULL,
-	PCIEX_MFP,	PCIE_AER_UCE_MTLP,		NULL,
-	PCIEX_CTO,	PCIE_AER_UCE_TO,		PCI_TARG_MA,
-	PCIEX_UC,	PCIE_AER_UCE_UC,		NULL,
-	PCIEX_ECRC,	PCIE_AER_UCE_ECRC,		NULL,
-	PCIEX_CA,	PCIE_AER_UCE_CA,		PCI_TARG_REC_TA,
-#ifdef sparc
-	PCIEX_UR,	PCIE_AER_UCE_UR,		PCI_TARG_MA,
-#endif
-	PCIEX_POIS,	PCIE_AER_UCE_PTLP,		PCI_TARG_MDPE,
-	NULL, NULL, NULL
-};
-
-/* Translate Fabric ereports to ereport.io.pciex.* */
-static fab_erpt_tbl_t fab_pcie_sue_erpt_tbl[] = {
-	PCIEX_S_TA_SC,	PCIE_AER_SUCE_TA_ON_SC,		PCI_TARG_REC_TA,
-	PCIEX_S_MA_SC,	PCIE_AER_SUCE_MA_ON_SC,		PCI_TARG_MA,
-	PCIEX_S_RTA,	PCIE_AER_SUCE_RCVD_TA,		PCI_TARG_REC_TA,
-#ifdef sparc
-	PCIEX_S_RMA,	PCIE_AER_SUCE_RCVD_MA,		PCI_TARG_MA,
-#endif
-	PCIEX_S_USC,	PCIE_AER_SUCE_USC_ERR,		NULL,
-	PCIEX_S_USCMD,	PCIE_AER_SUCE_USC_MSG_DATA_ERR,	PCI_TARG_REC_TA,
-	PCIEX_S_UDE,	PCIE_AER_SUCE_UC_DATA_ERR,	PCI_TARG_MDPE,
-	PCIEX_S_UAT,	PCIE_AER_SUCE_UC_ATTR_ERR,	PCI_TARG_MDPE,
-	PCIEX_S_UADR,	PCIE_AER_SUCE_UC_ADDR_ERR,	PCI_TARG_MDPE,
-	PCIEX_S_TEX,	PCIE_AER_SUCE_TIMER_EXPIRED,	NULL,
-	PCIEX_S_PERR,	PCIE_AER_SUCE_PERR_ASSERT,	PCI_TARG_MDPE,
-	PCIEX_S_SERR,	PCIE_AER_SUCE_SERR_ASSERT,	NULL,
-	PCIEX_INTERR,	PCIE_AER_SUCE_INTERNAL_ERR,	NULL,
-	NULL, NULL, NULL
-};
-
-/* Translate Fabric ereports to ereport.io.pcix.* */
-static fab_erpt_tbl_t fab_pcix_erpt_tbl[] = {
-	PCIX_SPL_DIS,		PCI_PCIX_SPL_DSCD,	NULL,
-	PCIX_UNEX_SPL,		PCI_PCIX_UNEX_SPL,	NULL,
-	PCIX_RX_SPL_MSG,	PCI_PCIX_RX_SPL_MSG,	NULL,
-	NULL, NULL, NULL
-};
-static fab_erpt_tbl_t *fab_pcix_bdg_erpt_tbl = fab_pcix_erpt_tbl;
-
-/* Translate Fabric ereports to ereport.io.pcix.sec-* */
-static fab_erpt_tbl_t fab_pcix_bdg_sec_erpt_tbl[] = {
-	PCIX_SPL_DIS,		PCI_PCIX_BSS_SPL_DSCD,	NULL,
-	PCIX_UNEX_SPL,		PCI_PCIX_BSS_UNEX_SPL,	NULL,
-	PCIX_BSS_SPL_OR,	PCI_PCIX_BSS_SPL_OR,	NULL,
-	PCIX_BSS_SPL_DLY,	PCI_PCIX_BSS_SPL_DLY,	NULL,
-	NULL, NULL, NULL
-};
-
-/* Translate Fabric ereports to ereport.io.pciex.* */
-static fab_erpt_tbl_t fab_pcie_nadv_erpt_tbl[] = {
-#ifdef sparc
-	PCIEX_UR,		PCIE_DEVSTS_UR_DETECTED,	NULL,
-#endif
-	PCIEX_FAT,		PCIE_DEVSTS_FE_DETECTED,	NULL,
-	PCIEX_NONFAT,		PCIE_DEVSTS_NFE_DETECTED,	NULL,
-	PCIEX_CORR,		PCIE_DEVSTS_CE_DETECTED,	NULL,
-	NULL, NULL, NULL
-};
-
-/* Translate Fabric ereports to ereport.io.pciex.* */
-static fab_erpt_tbl_t fab_pcie_rc_erpt_tbl[] = {
-	PCIEX_RC_FE_MSG,	PCIE_AER_RE_STS_FE_MSGS_RCVD,	NULL,
-	PCIEX_RC_NFE_MSG,	PCIE_AER_RE_STS_NFE_MSGS_RCVD,	NULL,
-	PCIEX_RC_CE_MSG,	PCIE_AER_RE_STS_CE_RCVD,	NULL,
-	PCIEX_RC_MCE_MSG,	PCIE_AER_RE_STS_MUL_CE_RCVD,	NULL,
-	PCIEX_RC_MUE_MSG,	PCIE_AER_RE_STS_MUL_FE_NFE_RCVD, NULL,
-	NULL, NULL, NULL
-};
-
-/*
- * Translate Fabric ereports to pseudo ereport.io.pciex.* RC Fabric Messages.
- * If the RP is not a PCIe compliant RP or does not support AER, rely on the
- * leaf fabric ereport to help create a xxx_MSG ereport coming from the RC.
- */
-static fab_erpt_tbl_t fab_pcie_fake_rc_erpt_tbl[] = {
-	PCIEX_RC_FE_MSG,	PCIE_DEVSTS_FE_DETECTED,	NULL,
-	PCIEX_RC_NFE_MSG,	PCIE_DEVSTS_NFE_DETECTED,	NULL,
-	PCIEX_RC_CE_MSG,	PCIE_DEVSTS_CE_DETECTED,	NULL,
-	NULL, NULL, NULL,
-};
-
-static fab_err_tbl_t *fab_master_err_tbl;
-
-/*
- * Translation tables for converting fire error bits into "pci" ereports.
- * <Fire Bit>
- * <pci ereport Class>
- * <pci error status reg>
- * <pci bridge status reg>
- * <pci target class>
- */
-#define	FAB_FIRE_PEC_BIT(fb) "ereport.io." PCIEX_FIRE "." FIRE_PEC_ ## fb
-#define	FAB_FIRE_DMC_BIT(fb) "ereport.io." PCIEX_FIRE "." FIRE_DMC_ ## fb
-#define	FAB_N2_DMU_BIT(fb) "ereport.io.n2.dmu." fb
-#define	FAB_OB_PEC_BIT(fb) "ereport.io." PCIEX_OBERON "." FIRE_PEC_ ## fb
-
-#define	FAB_FIRE_UE(fb, bit, sts, bdg) \
-	FAB_FIRE_PEC_BIT(fb), PCIE_AER_UCE_ ## bit, sts, bdg
-#define	FAB_OB_UE(fb, bit, sts, bdg) \
-	FAB_OB_PEC_BIT(fb), PCIE_AER_UCE_ ## bit, sts, bdg
-static fab_fire_tbl_t fab_fire_pec_ue_tbl[] = {
-	FAB_FIRE_UE(UR,	 UR,	   PCI_STAT_S_SYSERR,	0),
-	FAB_FIRE_UE(UC,	 UC,	   PCI_STAT_S_SYSERR,	0),
-	FAB_OB_UE(ECRC,	 ECRC,	   PCI_STAT_S_SYSERR,	0),
-	FAB_FIRE_UE(CTO, TO,	   PCI_STAT_S_SYSERR,	0),
-	FAB_FIRE_UE(ROF, RO,	   PCI_STAT_S_SYSERR,	0),
-	FAB_FIRE_UE(MFP, MTLP,	   PCI_STAT_S_SYSERR,	0),
-	FAB_FIRE_UE(PP,	 PTLP,	   PCI_STAT_S_PERROR,
-	    (PCI_STAT_S_SYSERR | PCI_STAT_PERROR)),
-	FAB_FIRE_UE(FCP, FCP,	   PCI_STAT_S_SYSERR,	0),
-	FAB_FIRE_UE(DLP, DLP,	   PCI_STAT_S_SYSERR,	0),
-	FAB_FIRE_UE(TE,	 TRAINING, PCI_STAT_S_SYSERR,	0),
-	FAB_FIRE_UE(CA,	 CA,	   PCI_STAT_S_TARG_AB,
-	    PCI_STAT_S_TARG_AB),
-	NULL, NULL, NULL,
-};
-
-#define	FAB_FIRE_CE(fb, bit) \
-	FAB_FIRE_PEC_BIT(fb), PCIE_AER_CE_ ## bit, 0, 0
-static fab_fire_tbl_t fab_fire_pec_ce_tbl[] = {
-	FAB_FIRE_CE(RTO,	REPLAY_TO),
-	FAB_FIRE_CE(RNR,	REPLAY_ROLLOVER),
-	FAB_FIRE_CE(BDP,	BAD_DLLP),
-	FAB_FIRE_CE(BTP,	BAD_TLP),
-	FAB_FIRE_CE(RE,		RECEIVER_ERR),
-	NULL, NULL, NULL,
-};
-
-/*
- * WUC/RUC will need to be special cased for the target ereports, because you
- * need to decode the tlp log.
- */
-#define	FAB_FIRE_WUCRUC(fb) \
-	FAB_FIRE_PEC_BIT(fb), 0, 0, (PCI_STAT_R_MAST_AB | PCI_STAT_S_SYSERR)
-#define	FAB_FIRE_OE(fb, bit) \
-	FAB_FIRE_PEC_BIT(fb), PCIE_AER_UCE_ ## bit, PCI_STAT_S_SYSERR, 0
-#define	FAB_OB_OE(fb, bit) \
-	FAB_FIRE_PEC_BIT(fb), PCIE_AER_UCE_ ## bit, PCI_STAT_S_SYSERR, 0
-static fab_fire_tbl_t fab_fire_pec_oe_tbl[] = {
-	FAB_FIRE_WUCRUC(WUC),
-	FAB_FIRE_WUCRUC(RUC),
-	FAB_FIRE_OE(ERU, DLP),
-	FAB_FIRE_OE(ERO, DLP),
-	FAB_FIRE_OE(EMP, DLP),
-	FAB_FIRE_OE(EPE, DLP),
-	NULL, NULL, NULL,
-};
-
-#define	FAB_FIRE_DMC(fb) \
-	FAB_FIRE_DMC_BIT(fb), PCIE_AER_UCE_CA, 0, PCI_STAT_S_TARG_AB
-#define	FAB_N2_DMU(fb) \
-	FAB_N2_DMU_BIT(fb), PCIE_AER_UCE_CA, 0, PCI_STAT_S_TARG_AB
-static fab_fire_tbl_t fab_fire_dmc_tbl[] = {
-	FAB_FIRE_DMC(BYP_ERR),
-	FAB_FIRE_DMC(BYP_OOR),
-	FAB_FIRE_DMC(TRN_OOR),
-	FAB_FIRE_DMC(TTE_INV),
-	FAB_FIRE_DMC(TTE_PRT),
-	FAB_N2_DMU("iotsbdesc_inv"),
-	FAB_N2_DMU("sun4v_adj_va_uf"),
-	FAB_N2_DMU("sun4v_inv_pg_sz"),
-	FAB_N2_DMU("sun4v_key_err"),
-	FAB_N2_DMU("sun4v_va_oor"),
-	NULL, NULL, NULL
-};
-
-static fmd_xprt_t *fab_fmd_xprt = NULL;	/* FMD transport layer handle */
-static char fab_buf[FM_MAX_CLASS];
-static boolean_t fab_xlate_fake_rp = B_TRUE;
-
-#define	HAS_PROP(node, name) xmlHasProp(node, (const xmlChar *)name)
-#define	GET_PROP(node, name) ((char *)xmlGetProp(node, (const xmlChar *)name))
-#define	FREE_PROP(prop) xmlFree((xmlChar *)prop)
-#define	STRCMP(s1, s2) (strcmp((const char *)s1, (const char *)s2) == 0)
-
-#define	FAB_LOOKUP(sz, name, field) \
-	(void) nvlist_lookup_uint ## sz(nvl, name, field)
-/* ARGSUSED */
-static void
-fab_pci_fabric_to_data(fmd_hdl_t *hdl, nvlist_t *nvl, fab_data_t *data) {
-	data->nvl = nvl;
-
-	/* Generic PCI device information */
-	FAB_LOOKUP(16,	"bdf",			&data->bdf);
-	FAB_LOOKUP(16,	"device_id",		&data->device_id);
-	FAB_LOOKUP(16,	"vendor_id",		&data->vendor_id);
-	FAB_LOOKUP(8,	"rev_id",		&data->rev_id);
-	FAB_LOOKUP(16,	"dev_type",		&data->dev_type);
-	FAB_LOOKUP(16,	"pcie_off",		&data->pcie_off);
-	FAB_LOOKUP(16,	"pcix_off",		&data->pcix_off);
-	FAB_LOOKUP(16,	"aer_off",		&data->aer_off);
-	FAB_LOOKUP(16,	"ecc_ver",		&data->ecc_ver);
-
-	/* Misc ereport information */
-	FAB_LOOKUP(32,	"remainder",		&data->remainder);
-	FAB_LOOKUP(32,	"severity",		&data->severity);
-
-	/* PCI registers */
-	FAB_LOOKUP(16,	"pci_status",		&data->pci_err_status);
-	FAB_LOOKUP(16,	"pci_command",		&data->pci_cfg_comm);
-
-	/* PCI bridge registers */
-	FAB_LOOKUP(16,	"pci_bdg_sec_status",	&data->pci_bdg_sec_stat);
-	FAB_LOOKUP(16,	"pci_bdg_ctrl",		&data->pci_bdg_ctrl);
-
-	/* PCIx registers */
-	FAB_LOOKUP(32,	"pcix_status",		&data->pcix_status);
-	FAB_LOOKUP(16,	"pcix_command",		&data->pcix_command);
-
-	/* PCIx ECC Registers */
-	FAB_LOOKUP(16,	"pcix_ecc_control_0",	&data->pcix_ecc_control_0);
-	FAB_LOOKUP(16,	"pcix_ecc_status_0",	&data->pcix_ecc_status_0);
-	FAB_LOOKUP(32,	"pcix_ecc_fst_addr_0",	&data->pcix_ecc_fst_addr_0);
-	FAB_LOOKUP(32,	"pcix_ecc_sec_addr_0",	&data->pcix_ecc_sec_addr_0);
-	FAB_LOOKUP(32,	"pcix_ecc_attr_0",	&data->pcix_ecc_attr_0);
-
-	/* PCIx ECC Bridge Registers */
-	FAB_LOOKUP(16,	"pcix_ecc_control_1",	&data->pcix_ecc_control_1);
-	FAB_LOOKUP(16,	"pcix_ecc_status_1",	&data->pcix_ecc_status_1);
-	FAB_LOOKUP(32,	"pcix_ecc_fst_addr_1",	&data->pcix_ecc_fst_addr_1);
-	FAB_LOOKUP(32,	"pcix_ecc_sec_addr_1",	&data->pcix_ecc_sec_addr_1);
-	FAB_LOOKUP(32,	"pcix_ecc_attr_1",	&data->pcix_ecc_attr_1);
-
-	/* PCIx Bridge */
-	FAB_LOOKUP(32,	"pcix_bdg_status",	&data->pcix_bdg_stat);
-	FAB_LOOKUP(16,	"pcix_bdg_sec_status",	&data->pcix_bdg_sec_stat);
-
-	/* PCIe registers */
-	FAB_LOOKUP(16,	"pcie_status",		&data->pcie_err_status);
-	FAB_LOOKUP(16,	"pcie_command",		&data->pcie_err_ctl);
-	FAB_LOOKUP(32,	"pcie_dev_cap",		&data->pcie_dev_cap);
-
-	/* PCIe AER registers */
-	FAB_LOOKUP(32,	"pcie_adv_ctl",		&data->pcie_adv_ctl);
-	FAB_LOOKUP(32,	"pcie_ue_status",	&data->pcie_ue_status);
-	FAB_LOOKUP(32,	"pcie_ue_mask",		&data->pcie_ue_mask);
-	FAB_LOOKUP(32,	"pcie_ue_sev",		&data->pcie_ue_sev);
-	FAB_LOOKUP(32,	"pcie_ue_hdr0",		&data->pcie_ue_hdr[0]);
-	FAB_LOOKUP(32,	"pcie_ue_hdr1",		&data->pcie_ue_hdr[1]);
-	FAB_LOOKUP(32,	"pcie_ue_hdr2",		&data->pcie_ue_hdr[2]);
-	FAB_LOOKUP(32,	"pcie_ue_hdr3",		&data->pcie_ue_hdr[3]);
-	FAB_LOOKUP(32,	"pcie_ce_status",	&data->pcie_ce_status);
-	FAB_LOOKUP(32,	"pcie_ce_mask",		&data->pcie_ce_mask);
-	FAB_LOOKUP(32,	"pcie_ue_tgt_trans",	&data->pcie_ue_tgt_trans);
-	FAB_LOOKUP(64,	"pcie_ue_tgt_addr",	&data->pcie_ue_tgt_addr);
-	FAB_LOOKUP(16,	"pcie_ue_tgt_bdf",	&data->pcie_ue_tgt_bdf);
-
-	/* PCIe BDG AER registers */
-	FAB_LOOKUP(32,	"pcie_sue_adv_ctl",	&data->pcie_sue_ctl);
-	FAB_LOOKUP(32,	"pcie_sue_status",	&data->pcie_sue_status);
-	FAB_LOOKUP(32,	"pcie_sue_mask",	&data->pcie_sue_mask);
-	FAB_LOOKUP(32,	"pcie_sue_sev",		&data->pcie_sue_sev);
-	FAB_LOOKUP(32,	"pcie_sue_hdr0",	&data->pcie_sue_hdr[0]);
-	FAB_LOOKUP(32,	"pcie_sue_hdr1",	&data->pcie_sue_hdr[1]);
-	FAB_LOOKUP(32,	"pcie_sue_hdr2",	&data->pcie_sue_hdr[2]);
-	FAB_LOOKUP(32,	"pcie_sue_hdr3",	&data->pcie_sue_hdr[3]);
-	FAB_LOOKUP(32,	"pcie_sue_tgt_trans",	&data->pcie_sue_tgt_trans);
-	FAB_LOOKUP(64,	"pcie_sue_tgt_addr",	&data->pcie_sue_tgt_addr);
-	FAB_LOOKUP(16,	"pcie_sue_tgt_bdf",	&data->pcie_sue_tgt_bdf);
-
-	/* PCIe RP registers */
-	FAB_LOOKUP(32,	"pcie_rp_status",	&data->pcie_rp_status);
-	FAB_LOOKUP(16,	"pcie_rp_control",	&data->pcie_rp_ctl);
-
-	/* PCIe RP AER registers */
-	FAB_LOOKUP(32,	"pcie_adv_rp_status",	&data->pcie_rp_err_status);
-	FAB_LOOKUP(32,	"pcie_adv_rp_command",	&data->pcie_rp_err_cmd);
-	FAB_LOOKUP(16,	"pcie_adv_rp_ce_src_id", &data->pcie_rp_ce_src_id);
-	FAB_LOOKUP(16,	"pcie_adv_rp_ue_src_id", &data->pcie_rp_ue_src_id);
-
-	/*
-	 * If the system has a PCIe complaint RP with AER, turn off translating
-	 * fake RP ereports.
-	 */
-	if (fab_xlate_fake_rp &&
-	    (data->dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) &&
-	    data->aer_off)
-		fab_xlate_fake_rp = B_FALSE;
-}
-
-/* ARGSUSED */
-static void
-fab_fire_to_data(fmd_hdl_t *hdl, nvlist_t *nvl, fab_data_t *data) {
-	data->nvl = nvl;
-
-	/* Always Root Complex */
-	data->dev_type = PCIE_PCIECAP_DEV_TYPE_ROOT;
-
-	data->pcie_ue_sev = (PCIE_AER_UCE_DLP | PCIE_AER_UCE_SD |
-	    PCIE_AER_UCE_FCP | PCIE_AER_UCE_RO | PCIE_AER_UCE_MTLP);
-}
-
-/* ARGSUSED */
-static int
-fab_prep_basic_erpt(fmd_hdl_t *hdl, nvlist_t *nvl, nvlist_t *erpt,
-    boolean_t isRC) {
-	uint64_t	*now;
-	uint64_t	ena;
-	uint_t		nelem;
-	nvlist_t	*detector, *new_detector;
-	char		rcpath[255];
-	int		err = 0;
-
-	/* Grab the tod, ena and detector(FMRI) */
-	err |= nvlist_lookup_uint64_array(nvl, "__tod", &now, &nelem);
-	err |= nvlist_lookup_uint64(nvl, "ena", &ena);
-	err |= nvlist_lookup_nvlist(nvl, FM_EREPORT_DETECTOR, &detector);
-	if (err)
-		return (err);
-
-	/* Make a copy of the detector */
-	err = nvlist_dup(detector, &new_detector, NV_UNIQUE_NAME);
-	if (err)
-		return (err);
-
-	/* Copy the tod and ena to erpt */
-	(void) nvlist_add_uint64(erpt, FM_EREPORT_ENA, ena);
-	(void) nvlist_add_uint64_array(erpt, "__tod", now, nelem);
-
-	/*
-	 * Create the correct ROOT FMRI from PCIe leaf fabric ereports.	 Used
-	 * only by fab_prep_fake_rc_erpt.  See the fab_pciex_fake_rc_erpt_tbl
-	 * comments for more information.
-	 */
-	if (isRC && fab_get_rcpath(hdl, nvl, rcpath)) {
-		/* Create the correct PCIe RC new_detector aka FMRI */
-		(void) nvlist_remove(new_detector, FM_FMRI_DEV_PATH,
-		    DATA_TYPE_STRING);
-		(void) nvlist_add_string(new_detector, FM_FMRI_DEV_PATH,
-		    rcpath);
-	}
-
-	/* Copy the FMRI to erpt */
-	(void) nvlist_add_nvlist(erpt, FM_EREPORT_DETECTOR, new_detector);
-
-	nvlist_free(new_detector);
-	return (err);
-}
-
-static void
-fab_send_tgt_erpt(fmd_hdl_t *hdl, fab_data_t *data, const char *class,
-    boolean_t isPrimary)
-{
-	nvlist_t	*nvl = data->nvl;
-	nvlist_t	*erpt;
-	char		*fmri = NULL;
-	uint32_t	tgt_trans;
-	uint64_t	tgt_addr;
-	uint16_t	tgt_bdf;
-
-	if (isPrimary) {
-		tgt_trans = data->pcie_ue_tgt_trans;
-		tgt_addr = data->pcie_ue_tgt_addr;
-		tgt_bdf = data->pcie_ue_tgt_bdf;
-	} else {
-		tgt_trans = data->pcie_sue_tgt_trans;
-		tgt_addr = data->pcie_sue_tgt_addr;
-		tgt_bdf = data->pcie_sue_tgt_bdf;
-	}
-
-	fmd_hdl_debug(hdl, "Sending Target Ereport: "
-	    "type 0x%x addr 0x%llx fltbdf 0x%x\n",
-	    tgt_trans, tgt_addr, tgt_bdf);
-
-	if (!tgt_trans)
-		return;
-
-	if ((tgt_trans == PF_ADDR_PIO) && tgt_addr)
-		fmri = fab_find_addr(hdl, nvl, tgt_addr);
-	else if ((tgt_trans == PF_ADDR_CFG) && tgt_bdf)
-		fmri = fab_find_bdf(hdl, nvl, tgt_bdf);
-
-	if (fmri) {
-		uint64_t	*now;
-		uint64_t	ena;
-		uint_t		nelem;
-		nvlist_t	*detector;
-		int		err = 0;
-
-		/* Allocate space for new erpt */
-		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
-			goto done;
-
-		/* Generate the target ereport class */
-		(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-		    PCI_ERROR_SUBCLASS, class);
-		(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-		/* Grab the tod, ena and detector(FMRI) */
-		err |= nvlist_lookup_uint64_array(nvl, "__tod", &now, &nelem);
-		err |= nvlist_lookup_uint64(nvl, "ena", &ena);
-
-		/* Copy the tod and ena to erpt */
-		(void) nvlist_add_uint64(erpt, FM_EREPORT_ENA, ena);
-		(void) nvlist_add_uint64_array(erpt, "__tod", now, nelem);
-
-		/* Create the correct FMRI */
-		if (nvlist_alloc(&detector, NV_UNIQUE_NAME, 0) != 0) {
-			nvlist_free(erpt);
-			goto done;
-		}
-		(void) nvlist_add_uint8(detector, FM_VERSION,
-		    FM_DEV_SCHEME_VERSION);
-		(void) nvlist_add_string(detector, FM_FMRI_SCHEME,
-		    FM_FMRI_SCHEME_DEV);
-		(void) nvlist_add_string(detector, FM_FMRI_DEV_PATH, fmri);
-		(void) nvlist_add_nvlist(erpt, FM_EREPORT_DETECTOR, detector);
-		nvlist_free(detector);
-
-		/* Add the address payload */
-		(void) nvlist_add_uint64(erpt, PCI_PA, tgt_addr);
-
-		fmd_hdl_debug(hdl, "Sending target ereport: %s 0x%x\n",
-		    fab_buf, tgt_addr);
-		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
-		if (fmd_xprt_error(hdl, fab_fmd_xprt))
-			goto done;
-		xmlFree(fmri);
-	} else {
-		fmd_hdl_debug(hdl, "Cannot find Target FMRI addr:0x%llx",
-		    tgt_addr);
-	}
-
-	return;
-done:
-	if (fmri)
-		xmlFree(fmri);
-	fmd_hdl_debug(hdl, "Failed to send Target PCI ereport\n");
-}
-
-static void
-fab_send_erpt(fmd_hdl_t *hdl, fab_data_t *data, fab_err_tbl_t *tbl)
-{
-	fab_erpt_tbl_t	*erpt_tbl, *entry;
-	nvlist_t	*erpt;
-	uint32_t	reg;
-
-	erpt_tbl = tbl->erpt_tbl;
-	if (tbl->reg_size == 16) {
-		reg = (uint32_t)*((uint16_t *)
-		    ((uint32_t)data + tbl->reg_offset));
-	} else {
-		reg = *((uint32_t *)((uint32_t)data + tbl->reg_offset));
-	}
-
-	for (entry = erpt_tbl; entry->err_class; entry++) {
-		if (!(reg & entry->reg_bit))
-			continue;
-
-		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
-			goto done;
-		if (tbl->fab_prep(hdl, data, erpt, entry) != 0) {
-			fmd_hdl_debug(hdl, "Prepping ereport failed\n");
-			nvlist_free(erpt);
-			continue;
-		}
-
-		fmd_hdl_debug(hdl, "Sending ereport: %s 0x%x\n", fab_buf, reg);
-		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
-		if (fmd_xprt_error(hdl, fab_fmd_xprt)) {
-			fmd_hdl_debug(hdl, "Failed to send PCI ereport\n");
-			return;
-		}
-	}
-
-	return;
-done:
-	fmd_hdl_debug(hdl, "Failed  to send PCI ereport\n");
-}
-
-static int
-fab_prep_pci_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-	    PCI_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	(void) nvlist_add_uint16(erpt, PCI_CONFIG_STATUS, data->pci_err_status);
-	(void) nvlist_add_uint16(erpt, PCI_CONFIG_COMMAND, data->pci_cfg_comm);
-
-	return (err);
-}
-
-static int
-fab_prep_pci_bdg_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s-%s",
-	    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	(void) nvlist_add_uint16(erpt, PCI_SEC_CONFIG_STATUS,
-	    data->pci_bdg_sec_stat);
-	(void) nvlist_add_uint16(erpt, PCI_BCNTRL, data->pci_bdg_ctrl);
-
-	return (err);
-}
-
-static int
-fab_prep_pci_bdg_ctl_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-	    PCI_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	(void) nvlist_add_uint16(erpt, PCI_SEC_CONFIG_STATUS,
-	    data->pci_bdg_sec_stat);
-	(void) nvlist_add_uint16(erpt, PCI_BCNTRL, data->pci_bdg_ctrl);
-
-	return (err);
-}
-
-
-static int
-fab_prep_pcie_ce_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-	    PCIEX_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	(void) nvlist_add_uint16(erpt, PCIEX_DEVSTS_REG, data->pcie_err_status);
-	(void) nvlist_add_uint32(erpt, PCIEX_CE_STATUS_REG,
-	    data->pcie_ce_status);
-
-	return (err);
-}
-
-static int
-fab_prep_pcie_ue_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	uint32_t first_err = 1 << (data->pcie_adv_ctl &
-	    PCIE_AER_CTL_FST_ERR_PTR_MASK);
-	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-	    PCIEX_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+fmd_xprt_t *fab_fmd_xprt;	/* FMD transport layer handle */
+char fab_buf[FM_MAX_CLASS];
 
-	(void) nvlist_add_uint16(erpt, PCIEX_DEVSTS_REG, data->pcie_err_status);
-	(void) nvlist_add_uint32(erpt, PCIEX_UE_STATUS_REG,
-	    data->pcie_ue_status);
-	(void) nvlist_add_uint32(erpt, PCIEX_UE_SEV_REG, data->pcie_ue_sev);
-	(void) nvlist_add_uint32(erpt, PCIEX_ADV_CTL, data->pcie_adv_ctl);
-
-	fmd_hdl_debug(hdl, "Bit 0x%x First Err 0x%x", tbl->reg_bit, first_err);
-
-	if ((tbl->reg_bit == first_err) && data->pcie_ue_tgt_bdf) {
-		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID,
-		    data->pcie_ue_tgt_bdf);
-		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_TRUE);
-	} else {
-		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID, 0);
-		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_FALSE);
-	}
-
-	if ((tbl->reg_bit == first_err) && data->pcie_ue_tgt_trans) {
-		if (tbl->tgt_class)
-			fab_send_tgt_erpt(hdl, data, tbl->tgt_class, B_TRUE);
-	}
-
-	return (err);
-}
-
-static int
-fab_prep_pcie_sue_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	uint32_t first_err = 1 << (data->pcie_sue_ctl &
-	    PCIE_AER_SCTL_FST_ERR_PTR_MASK);
-	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-	    PCIEX_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	(void) nvlist_add_uint32(erpt, PCIEX_SEC_UE_STATUS,
-	    data->pcie_sue_status);
-
-	fmd_hdl_debug(hdl, "Bit 0x%x First Err 0x%x", tbl->reg_bit, first_err);
-
-	if ((tbl->reg_bit == first_err) && data->pcie_sue_tgt_bdf) {
-		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID,
-		    data->pcie_sue_tgt_bdf);
-		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_TRUE);
-	} else {
-		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID, 0);
-		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_FALSE);
-	}
-
-	if ((tbl->reg_bit == first_err) && data->pcie_sue_tgt_trans) {
-		if (tbl->tgt_class)
-			fab_send_tgt_erpt(hdl, data, tbl->tgt_class, B_FALSE);
-	}
-
-	return (err);
-}
-
-static int
-fab_prep_pcix_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	int err = 0;
-
-	/* Only send if this is not a bridge */
-	if (!data->pcix_status || data->pcix_bdg_sec_stat)
-		return (1);
-
-	err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-	    PCIX_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	(void) nvlist_add_uint8(erpt, PCIX_COMMAND, data->pcix_command);
-	(void) nvlist_add_uint32(erpt, PCIX_STATUS, data->pcix_status);
-
-	return (err);
-}
-
-static void
-fab_send_pcix_ecc_erpt(fmd_hdl_t *hdl, fab_data_t *data)
-{
-	nvlist_t *erpt;
-	int ecc_phase = (data->pcix_ecc_status_0 & PCI_PCIX_ECC_PHASE) >> 0x4;
-	int ecc_corr = data->pcix_ecc_status_0 & PCI_PCIX_ECC_CORR;
-	int sec_ue = data->pcix_ecc_status_0 & PCI_PCIX_ECC_S_UE;
-	int sec_ce = data->pcix_ecc_status_0 & PCI_PCIX_ECC_S_CE;
-	uint32_t ctlstat = (data->pcix_ecc_control_0 << 16) |
-	    data->pcix_ecc_status_0;
-
-	switch (ecc_phase) {
-	case PCI_PCIX_ECC_PHASE_NOERR:
-		break;
-	case PCI_PCIX_ECC_PHASE_FADDR:
-	case PCI_PCIX_ECC_PHASE_SADDR:
-		(void) snprintf(fab_buf, FM_MAX_CLASS,
-		    "%s.%s", PCIX_ERROR_SUBCLASS,
-		    ecc_corr ? PCIX_ECC_CE_ADDR : PCIX_ECC_UE_ADDR);
-		break;
-	case PCI_PCIX_ECC_PHASE_ATTR:
-		(void) snprintf(fab_buf, FM_MAX_CLASS,
-		    "%s.%s", PCIX_ERROR_SUBCLASS,
-		    ecc_corr ? PCIX_ECC_CE_ATTR : PCIX_ECC_UE_ATTR);
-		break;
-	case PCI_PCIX_ECC_PHASE_DATA32:
-	case PCI_PCIX_ECC_PHASE_DATA64:
-		(void) snprintf(fab_buf, FM_MAX_CLASS,
-		    "%s.%s", PCIX_ERROR_SUBCLASS,
-		    ecc_corr ? PCIX_ECC_CE_DATA : PCIX_ECC_UE_DATA);
-		break;
-	}
-
-	if (ecc_phase) {
-		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
-			goto done;
-		(void) fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-		(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-		(void) nvlist_add_uint16(erpt, PCIX_COMMAND,
-		    data->pcix_command);
-		(void) nvlist_add_uint32(erpt, PCIX_STATUS, data->pcix_status);
-		(void) nvlist_add_uint32(erpt, PCIX_ECC_CTLSTAT, ctlstat);
-		(void) nvlist_add_uint32(erpt, PCIX_ECC_ATTR,
-		    data->pcix_ecc_attr_0);
-		fmd_hdl_debug(hdl, "Sending ecc ereport: %s\n", fab_buf);
-		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
-		if (fmd_xprt_error(hdl, fab_fmd_xprt))
-			fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
-	}
-
-	if (sec_ce || sec_ue) {
-		(void) snprintf(fab_buf, FM_MAX_CLASS,
-		    "%s.%s", PCIX_ERROR_SUBCLASS,
-		    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
-		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
-			goto done;
-		(void) fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-		(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-		(void) nvlist_add_uint16(erpt, PCIX_COMMAND,
-		    data->pcix_command);
-		(void) nvlist_add_uint32(erpt, PCIX_STATUS, data->pcix_status);
-		(void) nvlist_add_uint32(erpt, PCIX_ECC_CTLSTAT, ctlstat);
-		(void) nvlist_add_uint32(erpt, PCIX_ECC_ATTR,
-		    data->pcix_ecc_attr_0);
-		fmd_hdl_debug(hdl, "Sending ecc ereport: %s\n", fab_buf);
-		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
-		if (fmd_xprt_error(hdl, fab_fmd_xprt))
-			fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
-	}
-
-	return;
-done:
-	fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
-}
-
-static int
-fab_prep_pcix_bdg_sec_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s%s",
-	    PCIX_ERROR_SUBCLASS, PCIX_SEC_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	(void) nvlist_add_uint16(erpt, PCIX_SEC_STATUS,
-	    data->pcix_bdg_sec_stat);
-	(void) nvlist_add_uint32(erpt, PCIX_BDG_STAT, data->pcix_bdg_stat);
-
-	return (err);
-}
-
-static int
-fab_prep_pcix_bdg_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-	    PCIX_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	(void) nvlist_add_uint16(erpt, PCIX_SEC_STATUS,
-	    data->pcix_bdg_sec_stat);
-	(void) nvlist_add_uint32(erpt, PCIX_BDG_STAT, data->pcix_bdg_stat);
-
-	return (err);
-}
-
-static void
-fab_send_pcix_bdg_ecc_erpt(fmd_hdl_t *hdl, fab_data_t *data)
-{
-	nvlist_t *erpt;
-	int ecc_phase = (data->pcix_ecc_status_1 & PCI_PCIX_ECC_PHASE) >> 0x4;
-	int ecc_corr = data->pcix_ecc_status_1 & PCI_PCIX_ECC_CORR;
-	int sec_ue = data->pcix_ecc_status_1 & PCI_PCIX_ECC_S_UE;
-	int sec_ce = data->pcix_ecc_status_1 & PCI_PCIX_ECC_S_CE;
-	uint32_t ctlstat = (data->pcix_ecc_control_1 << 16) |
-	    data->pcix_ecc_status_1;
-
-	switch (ecc_phase) {
-	case PCI_PCIX_ECC_PHASE_NOERR:
-		break;
-	case PCI_PCIX_ECC_PHASE_FADDR:
-	case PCI_PCIX_ECC_PHASE_SADDR:
-		(void) snprintf(fab_buf, FM_MAX_CLASS,
-		    "%s.%s%s", PCIX_ERROR_SUBCLASS, PCIX_SEC_ERROR_SUBCLASS,
-		    ecc_corr ? PCIX_ECC_CE_ADDR : PCIX_ECC_UE_ADDR);
-		break;
-	case PCI_PCIX_ECC_PHASE_ATTR:
-		(void) snprintf(fab_buf, FM_MAX_CLASS,
-		    "%s.%s%s", PCIX_ERROR_SUBCLASS, PCIX_SEC_ERROR_SUBCLASS,
-		    ecc_corr ? PCIX_ECC_CE_ATTR : PCIX_ECC_UE_ATTR);
-		break;
-	case PCI_PCIX_ECC_PHASE_DATA32:
-	case PCI_PCIX_ECC_PHASE_DATA64:
-		(void) snprintf(fab_buf, FM_MAX_CLASS,
-		    "%s.%s%s", PCIX_ERROR_SUBCLASS, PCIX_SEC_ERROR_SUBCLASS,
-		    ecc_corr ? PCIX_ECC_CE_DATA : PCIX_ECC_UE_DATA);
-		break;
-	}
-	if (ecc_phase) {
-		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
-			goto done;
-		(void) fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-		(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-		(void) nvlist_add_uint16(erpt, PCIX_SEC_STATUS,
-		    data->pcix_bdg_sec_stat);
-		(void) nvlist_add_uint32(erpt, PCIX_BDG_STAT,
-		    data->pcix_bdg_stat);
-		(void) nvlist_add_uint32(erpt, PCIX_ECC_CTLSTAT, ctlstat);
-		(void) nvlist_add_uint32(erpt, PCIX_ECC_ATTR,
-		    data->pcix_ecc_attr_1);
-		fmd_hdl_debug(hdl, "Sending ecc ereport: %s\n", fab_buf);
-		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
-		if (fmd_xprt_error(hdl, fab_fmd_xprt))
-			fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
-	}
-
-	if (sec_ce || sec_ue) {
-		(void) snprintf(fab_buf, FM_MAX_CLASS,
-		    "%s.%s%s", PCIX_ERROR_SUBCLASS, PCIX_SEC_ERROR_SUBCLASS,
-		    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
-		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
-			goto done;
-		(void) fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-		(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-		(void) nvlist_add_uint16(erpt, PCIX_SEC_STATUS,
-		    data->pcix_bdg_sec_stat);
-		(void) nvlist_add_uint32(erpt, PCIX_BDG_STAT,
-		    data->pcix_bdg_stat);
-		(void) nvlist_add_uint32(erpt, PCIX_ECC_CTLSTAT, ctlstat);
-		(void) nvlist_add_uint32(erpt, PCIX_ECC_ATTR,
-		    data->pcix_ecc_attr_1);
-		fmd_hdl_debug(hdl, "Sending ecc ereport: %s\n", fab_buf);
-		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
-		if (fmd_xprt_error(hdl, fab_fmd_xprt))
-			fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
-	}
-	return;
-done:
-	fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
-}
-
-static int
-fab_prep_pcie_nadv_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	int err = 0;
-
-	/* Don't send this for PCI device, Root Ports, or PCIe with AER */
-	if ((data->dev_type == PCIE_PCIECAP_DEV_TYPE_PCI_DEV) ||
-	    (data->dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) ||
-	    data->aer_off)
-		return (1);
-
-	err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-	    PCIEX_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	(void) nvlist_add_uint16(erpt, PCIEX_DEVSTS_REG, data->pcie_err_status);
-
-	return (err);
-}
-
-static int
-fab_prep_pcie_rc_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	uint32_t status = data->pcie_rp_err_status;
-	int err = 0;
-	int isFE = 0, isNFE = 0;
-
-	fmd_hdl_debug(hdl, "XLATE RP Error Class %s", class);
-
-	if (!data->aer_off)
-		return (-1);
-
-	/* Only send a FE Msg if the 1st UE error is FE */
-	if (STRCMP(class, PCIEX_RC_FE_MSG))
-		if (!(status & PCIE_AER_RE_STS_FIRST_UC_FATAL))
-			return (-1);
-		else
-			isFE = 1;
-
-	/* Only send a NFE Msg is the 1st UE error is NFE */
-	if (STRCMP(class, PCIEX_RC_NFE_MSG))
-		if (status & PCIE_AER_RE_STS_FIRST_UC_FATAL)
-			return (-1);
-		else
-			isNFE = 1;
-
-	fmd_hdl_debug(hdl, "XLATE RP Error");
-
-	err |= fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-	    PCIEX_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	(void) nvlist_add_uint32(erpt, PCIEX_ROOT_ERRSTS_REG, status);
-	if ((isFE || isNFE) && data->pcie_rp_ue_src_id) {
-		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID,
-		    data->pcie_rp_ue_src_id);
-		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_TRUE);
-	}
-	if (STRCMP(class, PCIEX_RC_CE_MSG) && data->pcie_rp_ce_src_id) {
-		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID,
-		    data->pcie_rp_ce_src_id);
-		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_TRUE);
-	}
-
-	return (err);
-}
-
-static int
-fab_prep_pcie_fake_rc_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    fab_erpt_tbl_t *tbl)
-{
-	const char *class = tbl->err_class;
-	uint32_t rc_err_sts = 0;
-	int err = 0;
-
-	/*
-	 * Don't send this for PCI device or Root Ports.  Only send it on
-	 * systems with non-compliant RPs.
-	 */
-	if ((data->dev_type == PCIE_PCIECAP_DEV_TYPE_PCI_DEV) ||
-	    (data->dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) ||
-	    (!fab_xlate_fake_rp))
-		return (-1);
-
-	err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_TRUE);
-
-	/* Generate an ereport for this error bit. */
-	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
-	    PCIEX_ERROR_SUBCLASS, class);
-	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
-
-	/* Send PCIe RC Ereports */
-	if (data->pcie_err_status & PCIE_DEVSTS_CE_DETECTED) {
-		rc_err_sts |= PCIE_AER_RE_STS_CE_RCVD;
-	}
-
-	/* NFE/FE src id takes precedence over CE src id */
-	if (data->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) {
-		rc_err_sts |= PCIE_AER_RE_STS_NFE_MSGS_RCVD;
-		rc_err_sts |= PCIE_AER_RE_STS_FE_NFE_RCVD;
-	}
-	if (data->pcie_err_status & PCIE_DEVSTS_FE_DETECTED) {
-		rc_err_sts |= PCIE_AER_RE_STS_FE_MSGS_RCVD;
-		rc_err_sts |= PCIE_AER_RE_STS_FE_NFE_RCVD;
-	}
-	if ((data->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) &&
-	    (data->pcie_err_status & PCIE_DEVSTS_FE_DETECTED)) {
-		rc_err_sts |= PCIE_AER_RE_STS_FIRST_UC_FATAL;
-		rc_err_sts |= PCIE_AER_RE_STS_MUL_FE_NFE_RCVD;
-	}
-
-	(void) nvlist_add_uint32(erpt, PCIEX_ROOT_ERRSTS_REG, rc_err_sts);
-
-	if (!(rc_err_sts & PCIE_AER_RE_STS_MUL_FE_NFE_RCVD)) {
-		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID, data->bdf);
-		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_TRUE);
-	}
-
-	return (err);
-}
-
-static int
-fab_xlate_fire_ce(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    const char *class)
-{
-	fab_fire_tbl_t	*entry;
-	uint64_t	reg;
-
-	for (entry = fab_fire_pec_ce_tbl; entry->err_class; entry++) {
-		if (STRCMP(class, entry->err_class))
-			goto send;
-	}
-
-	return (0);
-
-send:
-	fmd_hdl_debug(hdl, "Translate Fire CE %s\n", class);
-
-	/* Fill in the device status register */
-	data->pcie_err_status = PCIE_DEVSTS_CE_DETECTED;
-
-	/* Fill in the AER CE register */
-	if (nvlist_lookup_uint64(erpt, "tlu-cess", &reg) == 0) {
-		data->pcie_ce_status = (uint32_t)reg | (uint32_t)(reg >> 32);
-	}
-
-	return (1);
-}
-
-static int
-fab_xlate_fire_ue(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    const char *class)
-{
-	fab_fire_tbl_t	*entry;
-	uint64_t	reg;
-	uint32_t	temp;
-	pcie_tlp_hdr_t	*hdr;
-
-	for (entry = fab_fire_pec_ue_tbl; entry->err_class; entry++) {
-		if (STRCMP(class, entry->err_class))
-			goto send;
-	}
-
-	return (0);
-
-send:
-	fmd_hdl_debug(hdl, "Translate Fire UE %s\n", class);
-
-	/* Fill in PCI Status Register */
-	data->pci_err_status = entry->pci_err_sts;
-	data->pci_bdg_sec_stat = entry->pci_bdg_sts;
-
-	/* Fill in the device status register */
-	if (entry->fire_bit & data->pcie_ue_sev)
-		data->pcie_err_status = PCIE_DEVSTS_FE_DETECTED;
-	else
-		data->pcie_err_status = PCIE_DEVSTS_NFE_DETECTED;
-
-	if (entry->fire_bit == PCIE_AER_UCE_UR)
-		data->pcie_err_status |= PCIE_DEVSTS_UR_DETECTED;
-
-	/* Fill in the AER UE register */
-	if (nvlist_lookup_uint64(erpt, "tlu-uess", &reg) == 0) {
-		data->pcie_ue_status = (uint32_t)reg | (uint32_t)(reg >> 32);
-	}
-
-	/* Fill in the AER Control register */
-	if ((reg & (uint64_t)entry->fire_bit) &&
-	    nvlist_lookup_boolean(erpt, "primary")) {
-		temp = entry->fire_bit;
-		for (data->pcie_adv_ctl = (uint32_t)-1; temp;
-		    data->pcie_adv_ctl++)
-			temp = temp >> 1;
-	}
-
-	/* If CTO create target information */
-	if (entry->fire_bit == PCIE_AER_UCE_TO &&
-	    nvlist_lookup_boolean(erpt, "primary")) {
-		if (nvlist_lookup_uint64(erpt, "tlu-tueh1l", &reg) == 0) {
-			data->pcie_ue_hdr[0] = (uint32_t)(reg >> 32);
-			data->pcie_ue_hdr[1] = (uint32_t)(reg);
-		}
-		if (nvlist_lookup_uint64(erpt, "tlu-tueh2l", &reg) == 0) {
-			data->pcie_ue_hdr[2] = (uint32_t)(reg >> 32);
-			data->pcie_ue_hdr[3] = (uint32_t)(reg);
-		}
-
-		hdr = (pcie_tlp_hdr_t *)(&data->pcie_ue_hdr[0]);
-		switch (hdr->type) {
-		case PCIE_TLP_TYPE_IO:
-		case PCIE_TLP_TYPE_MEM:
-		case PCIE_TLP_TYPE_MEMLK:
-			data->pcie_ue_tgt_trans = PF_ADDR_PIO;
-			if (hdr->fmt & 0x1) {
-				data->pcie_ue_tgt_addr = reg;
-			} else {
-				data->pcie_ue_tgt_addr = data->pcie_ue_hdr[2];
-			}
-			break;
-		case PCIE_TLP_TYPE_CFG0:
-		case PCIE_TLP_TYPE_CFG1:
-			data->pcie_ue_tgt_trans = PF_ADDR_CFG;
-			data->pcie_ue_tgt_bdf = data->pcie_ue_hdr[2] >> 16;
-			break;
-		}
-	}
-
-	/* Fill in the AER Header registers */
-	if (nvlist_lookup_uint64(erpt, "tlu-rueh1l", &reg) == 0) {
-		data->pcie_ue_hdr[0] = (uint32_t)(reg >> 32);
-		data->pcie_ue_hdr[1] = (uint32_t)(reg);
-	}
-	if (nvlist_lookup_uint64(erpt, "tlu-rueh2l", &reg) == 0) {
-		data->pcie_ue_hdr[2] = (uint32_t)(reg >> 32);
-		data->pcie_ue_hdr[3] = (uint32_t)(reg);
-	}
-
-	return (1);
-}
-
-static int
-fab_xlate_fire_oe(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    const char *class)
-{
-	fab_fire_tbl_t	*entry;
-	uint64_t	reg;
-
-	for (entry = fab_fire_pec_oe_tbl; entry->err_class; entry++) {
-		if (STRCMP(class, entry->err_class))
-			goto send;
-	}
-
-	return (0);
-
-send:
-	fmd_hdl_debug(hdl, "Translate Fire OE %s\n", class);
-
-	/* Fill in PCI Status Register */
-	if (entry->fire_bit) {
-		data->pci_err_status = entry->pci_err_sts;
-		data->pci_bdg_sec_stat = entry->pci_bdg_sts;
-	} else {
-		if (nvlist_lookup_uint64(erpt, "tlu-roeeh1l", &reg) == 0) {
-			data->pcie_ue_hdr[0] = (uint32_t)(reg >> 32);
-			data->pcie_ue_hdr[1] = (uint32_t)(reg);
-		}
-		if (nvlist_lookup_uint64(erpt, "tlu-roeeh2l", &reg) == 0) {
-			data->pcie_ue_hdr[2] = (uint32_t)(reg >> 32);
-			data->pcie_ue_hdr[3] = (uint32_t)(reg);
-		}
-
-		if (((pcie_tlp_hdr_t *)(&data->pcie_ue_hdr[0]))->type ==
-		    PCIE_TLP_TYPE_CPL) {
-			pcie_cpl_t *cpl = (pcie_cpl_t *)&data->pcie_ue_hdr[1];
-			switch (cpl->status) {
-			case PCIE_CPL_STS_UR:
-				data->pci_err_status = 0;
-				data->pci_bdg_sec_stat = PCI_STAT_R_MAST_AB |
-				    PCI_STAT_S_SYSERR;
-				break;
-			case PCIE_CPL_STS_CA:
-				data->pci_err_status = 0;
-				data->pci_bdg_sec_stat = PCI_STAT_R_TARG_AB |
-				    PCI_STAT_S_SYSERR;
-				break;
-			}
-		}
-	}
-
-	/* Fill in the device status register */
-	if (entry->fire_bit & data->pcie_ue_sev)
-		data->pcie_err_status = PCIE_DEVSTS_FE_DETECTED;
-	else
-		data->pcie_err_status = PCIE_DEVSTS_NFE_DETECTED;
-
-	/* Fill in the AER UE register */
-	data->pcie_ue_status = entry->fire_bit;
-
-	return (1);
-}
-
-static int
-fab_xlate_fire_dmc(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
-    const char *class)
-{
-	fab_fire_tbl_t	*entry;
-	uint64_t	reg;
-	uint32_t	temp;
-
-	for (entry = fab_fire_dmc_tbl; entry->err_class; entry++) {
-		fmd_hdl_debug(hdl, "Matching %s\n", entry->err_class);
-		if (STRCMP(class, entry->err_class) &&
-		    nvlist_lookup_boolean(erpt, "primary"))
-				goto send;
-	}
-
-	return (0);
-
-send:
-	fmd_hdl_debug(hdl, "Translate Fire DMC %s\n", class);
-
-	/* Fill in PCI Status Register */
-	data->pci_err_status = entry->pci_err_sts;
-	data->pci_bdg_sec_stat = entry->pci_bdg_sts;
-
-	/* Fill in the device status register */
-	data->pcie_err_status = PCIE_DEVSTS_NFE_DETECTED;
-
-	/* Fill in the AER UE register */
-	data->pcie_ue_status = entry->fire_bit;
-
-	/* Fill in the AER Control register */
-	temp = entry->fire_bit;
-	for (data->pcie_adv_ctl = (uint32_t)-1; temp; data->pcie_adv_ctl++)
-		temp = temp >> 1;
-
-	/* Fill in the AER Header registers */
-	if (nvlist_lookup_uint64(erpt, "mmu-tfsr", &reg) == 0) {
-		fmd_hdl_debug(hdl, "tfsr 0x%llx\n", reg);
-		/* Get the trans type */
-		temp = (reg & 0x3F0000) >> 16;
-		data->pcie_ue_hdr[0] = (uint32_t)(temp << 24);
-		data->pcie_ue_tgt_trans = PF_ADDR_DMA;
-		/* Get the req id */
-		temp = (reg & 0xFFFF);
-		data->pcie_ue_hdr[1] = (uint32_t)(temp << 16);
-		data->pcie_ue_tgt_bdf = temp;
-	}
-
-	if (nvlist_lookup_uint64(erpt, "mmu-tfar", &reg) == 0) {
-		fmd_hdl_debug(hdl, "tfar 0x%llx\n", reg);
-		/* Get the address */
-		data->pcie_ue_hdr[2] = reg;
-		data->pcie_ue_hdr[3] = 0;
-		data->pcie_ue_tgt_addr = reg;
-	}
-
-	fmd_hdl_debug(hdl, "HEADER 0 0x%x\n", data->pcie_ue_hdr[0]);
-	fmd_hdl_debug(hdl, "HEADER 1 0x%x\n", data->pcie_ue_hdr[1]);
-	fmd_hdl_debug(hdl, "HEADER 2 0x%x\n", data->pcie_ue_hdr[2]);
-	fmd_hdl_debug(hdl, "HEADER 3 0x%x\n", data->pcie_ue_hdr[3]);
-
-	return (1);
-}
-
-static void
-fab_xlate_pcie_erpts(fmd_hdl_t *hdl, fab_data_t *data)
-{
-	fab_err_tbl_t *tbl;
-
-	fmd_hdl_debug(hdl, "Sending Ereports Now");
-
-	/* Go through the error logs and send the relavant reports */
-	for (tbl = fab_master_err_tbl; tbl->erpt_tbl; tbl++) {
-		fab_send_erpt(hdl, data, tbl);
-	}
-
-	/* Send PCI-X ECC Ereports */
-	fab_send_pcix_ecc_erpt(hdl, data);
-	fab_send_pcix_bdg_ecc_erpt(hdl, data);
-}
-
-static void
-fab_xlate_fire_erpts(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *nvl,
-    const char *class)
-{
-	if (fmd_nvl_class_match(hdl, nvl, "ereport.io.fire.pec.*")) {
-		if (fab_xlate_fire_ce(hdl, data, nvl, class))
-			return;
-
-		if (fab_xlate_fire_ue(hdl, data, nvl, class))
-			return;
-
-		if (fab_xlate_fire_oe(hdl, data, nvl, class))
-			return;
-	} else if (fmd_nvl_class_match(hdl, nvl, "ereport.io.fire.dmc.*") ||
-	    fmd_nvl_class_match(hdl, nvl, "ereport.io.n2.dmu.*")) {
-		if (fab_xlate_fire_dmc(hdl, data, nvl, class))
-			return;
-	}
-}
+/* Static FM Topo XML Format and XML XPath Context  */
+static xmlDocPtr	fab_doc = NULL;
+xmlXPathContextPtr	fab_xpathCtx = NULL;
+static int		fab_valid_topo = 0;
 
 static void
 fab_update_topo(fmd_hdl_t *hdl)
@@ -1553,452 +75,33 @@
 	fab_valid_topo = 1;
 }
 
-#define	FAB_HC2DEV_QUERY_SIZE_MIN 160
-#define	FAB_HC2DEV_QUERY_SIZE(sz) \
-	((sz + FAB_HC2DEV_QUERY_SIZE_MIN) * sizeof (char))
-
-static boolean_t
-fab_hc2dev(fmd_hdl_t *hdl, nvlist_t *detector, char **dev_path,
-    uint_t *dev_path_size) {
-	xmlXPathObjectPtr xpathObj;
-	xmlNodeSetPtr	nodes;
-	char 		*query, *query_end, *temp;
-	uint_t 		i, size;
-	size_t		query_size = 0;
-	nvlist_t	**hcl;
-
-	if (nvlist_lookup_nvlist_array(detector, FM_FMRI_HC_LIST, &hcl,
-		&size) != 0)
-		goto fail;
-
-	for (i = 0; i < size; i++) {
-		if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, &temp) != 0)
-			goto fail;
-		query_size += strlen(temp);
-		if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &temp) != 0)
-			goto fail;
-		query_size += strlen(temp);
-		/* Adjust for '=' and '/' later */
-		query_size += 2;
-	}
-
-	query = fmd_hdl_alloc(hdl, FAB_HC2DEV_QUERY_SIZE(query_size),
-	    FMD_SLEEP);
-	(void) sprintf(query, "//propval[@name='resource' and "
-	    "contains(substring(@value, string-length(@value) - %d), '",
-	    query_size);
-
-	query_end = query;
-	query_end += strlen(query);
-
-	for (i = 0; i < size; i++) {
-		(void) nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, &temp);
-		(void) snprintf(query_end, query_size, "%s=", temp);
-		query_end += strlen(temp) + 1;
-		(void) nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &temp);
-		(void) snprintf(query_end, query_size, "%s", temp);
-		query_end += strlen(temp);
-		if (i != (size - 1)) {
-			(void) sprintf(query_end++, "/");
-		}
-	}
-
-	(void) sprintf(query_end, "')]/parent::*/following-sibling::*/"
-	    "propval[@name='dev']/@value");
-
-	fmd_hdl_debug(hdl, "xpathObj query %s\n", query);
-
-	xpathObj = xmlXPathEvalExpression((const xmlChar *)query,
-	    fab_xpathCtx);
-	fmd_hdl_free(hdl, query, FAB_HC2DEV_QUERY_SIZE(query_size));
-
-	if (xpathObj == NULL)
-		goto fail;
-
-	fmd_hdl_debug(hdl, "xpathObj 0x%p type %d\n", xpathObj,
-	    xpathObj->type);
-	nodes = xpathObj->nodesetval;
-
-	if (nodes) {
-		temp = (char *)xmlNodeGetContent(nodes->nodeTab[0]);
-		fmd_hdl_debug(hdl, "HC Dev Path: %s\n", temp);
-		*dev_path_size = strlen(temp) + 1;
-		*dev_path = fmd_hdl_alloc(hdl, *dev_path_size, FMD_SLEEP);
-		(void) strlcpy(*dev_path, (char *)temp, *dev_path_size);
-		xmlFree(temp);
-		xmlXPathFreeObject(xpathObj);
-		return (B_TRUE);
-	}
-	xmlXPathFreeObject(xpathObj);
-fail:
-	return (B_FALSE);
-}
-
-/* ARGSUSED */
-static boolean_t
-fab_get_rcpath(fmd_hdl_t *hdl, nvlist_t *nvl, char *rcpath) {
-	nvlist_t	*detector;
-	char		*path, *scheme;
-	uint_t		size;
-
-	if (nvlist_lookup_nvlist(nvl, FM_EREPORT_DETECTOR, &detector) != 0)
-		goto fail;
-	if (nvlist_lookup_string(detector, FM_FMRI_SCHEME, &scheme) != 0)
-		goto fail;
-
-	if (STRCMP(scheme, FM_FMRI_SCHEME_DEV)) {
-		if (nvlist_lookup_string(detector, FM_FMRI_DEV_PATH,
-			&path) != 0)
-			goto fail;
-		(void) strncpy(rcpath, path, FM_MAX_CLASS);
-	} else if (STRCMP(scheme, FM_FMRI_SCHEME_HC)) {
-		/*
-		 * This should only occur for ereports that come from the RC
-		 * itself.  In this case convert HC scheme to dev path.
-		 */
-		if (fab_hc2dev(hdl, detector, &path, &size)) {
-			(void) strncpy(rcpath, path, FM_MAX_CLASS);
-			fmd_hdl_free(hdl, path, size);
-		} else {
-			goto fail;
-		}
-	} else {
-		return (B_FALSE);
-	}
-
-	/*
-	 * Extract the RC path by taking the first device in the dev path
-	 *
-	 * /pci@0,0/pci8086,3605@2/pci8086,3500@0/pci8086,3514@1/pci8086,105e@0
-	 * - to -
-	 * /pci@0,0
-	 */
-	path = strchr(rcpath + 1, '/');
-	if (path)
-		path[0] = '\0';
-
-	return (B_TRUE);
-fail:
-	return (B_FALSE);
-}
-
-static char *
-fab_find_bdf(fmd_hdl_t *hdl, nvlist_t *nvl, pcie_req_id_t bdf) {
-	xmlXPathObjectPtr xpathObj;
-	xmlNodeSetPtr	nodes;
-	xmlChar 	*retval;
-	char		query[500];
-	int		bus, dev, fn;
-	char		rcpath[255];
-
-	if (bdf != (uint16_t)-1) {
-		bus = (bdf & PCIE_REQ_ID_BUS_MASK) >> PCIE_REQ_ID_BUS_SHIFT;
-		dev = (bdf & PCIE_REQ_ID_DEV_MASK) >> PCIE_REQ_ID_DEV_SHIFT;
-		fn = (bdf & PCIE_REQ_ID_FUNC_MASK) >> PCIE_REQ_ID_FUNC_SHIFT;
-	}
-
-	if (!fab_get_rcpath(hdl, nvl, rcpath))
-		goto fail;
-
-	/*
-	 * Explanation of the XSL XPATH Query
-	 * Line 1: Look at all nodes with the node name "propval"
-	 * Line 2-3: See if the "value" of the node ends with correct PCIEx BDF
-	 * Line 4-5: See if the "value" of the node ends with correct PCI BDF
-	 * Line 6: Go up one level to the parent of the current node
-	 * Line 7: See if child node contains "ASRU" with the same PCIe Root
-	 * Line 8: Traverse up the parent and the other siblings and look for
-	 *	   the io "propgroup" and get the value of the dev "propval"
-	 */
-	(void) snprintf(query, sizeof (query), "//propval["
-	    "contains(substring(@value, string-length(@value) - 34), "
-	    "'pciexbus=%d/pciexdev=%d/pciexfn=%d') or "
-	    "contains(substring(@value, string-length(@value) - 28), "
-	    "'pcibus=%d/pcidev=%d/pcifn=%d')"
-	    "]/parent::"
-	    "*/propval[@name='ASRU' and contains(@value, '%s')]"
-	    "/parent::*/following-sibling::*[@name='io']/propval[@name='dev']/"
-	    "@value", bus, dev, fn, bus, dev, fn, rcpath);
-
-	fmd_hdl_debug(hdl, "xpathObj query %s\n", query);
-
-	xpathObj = xmlXPathEvalExpression((const xmlChar *)query, fab_xpathCtx);
-
-	if (xpathObj == NULL)
-		goto fail;
-
-	fmd_hdl_debug(hdl, "xpathObj 0x%p type %d\n", xpathObj, xpathObj->type);
-
-	nodes = xpathObj->nodesetval;
-	if (nodes) {
-		retval = xmlNodeGetContent(nodes->nodeTab[0]);
-		fmd_hdl_debug(hdl, "BDF Dev Path: %s\n", retval);
-		xmlXPathFreeObject(xpathObj);
-		return ((char *)retval);
-	}
-fail:
-	return (NULL);
-}
-
-static char *
-fab_find_addr(fmd_hdl_t *hdl, nvlist_t *nvl, uint64_t addr) {
-	xmlXPathObjectPtr xpathObj;
-	xmlNodeSetPtr nodes;
-	xmlNodePtr devNode;
-	char *retval;
-	char query[500];
-	int size, i, j;
-	uint32_t prop[50];
-	char *token;
-	pci_regspec_t *assign_p;
-	uint64_t low, hi;
-	char rcpath[255];
-
-	if (!fab_get_rcpath(hdl, nvl, rcpath))
-		goto fail;
-
-	(void) snprintf(query, sizeof (query), "//propval["
-	    "@name='ASRU' and contains(@value, '%s')]/"
-	    "parent::*/following-sibling::*[@name='pci']/"
-	    "propval[@name='assigned-addresses']", rcpath);
-
-	fmd_hdl_debug(hdl, "xpathObj query %s\n", query);
-
-	xpathObj = xmlXPathEvalExpression((const xmlChar *)query, fab_xpathCtx);
-
-	if (xpathObj == NULL)
-		goto fail;
-
-	fmd_hdl_debug(hdl, "xpathObj 0x%p type %d\n", xpathObj, xpathObj->type);
-
-	nodes = xpathObj->nodesetval;
-	size = (nodes) ? nodes->nodeNr : 0;
-
-	/* Decode the list of assigned addresses xml nodes for each device */
-	for (i = 0; i < size; i++) {
-		char *tprop;
-
-		devNode = nodes->nodeTab[i];
-		if (!HAS_PROP(devNode, "value"))
-			continue;
-
-		/* Convert "string" assigned-addresses to pci_regspec_t */
-		j = 0;
-		tprop = GET_PROP(devNode, "value");
-		for (token = strtok(tprop, " "); token;
-		    token = strtok(NULL, " ")) {
-			prop[j++] = strtoul(token, (char **)NULL, 16);
-		}
-		prop[j] = (uint32_t)-1;
-		FREE_PROP(tprop);
-
-		/* Check if address belongs to this device */
-		for (assign_p = (pci_regspec_t *)prop;
-		    assign_p->pci_phys_hi != (uint_t)-1; assign_p++) {
-			low = assign_p->pci_phys_low;
-			hi = low + assign_p->pci_size_low;
-			if ((addr < hi) && (addr >= low)) {
-				fmd_hdl_debug(hdl, "Found Address\n");
-				goto found;
-			}
-		}
-	}
-	goto fail;
-
-found:
-	/* Traverse up the xml tree and back down to find the right propgroup */
-	for (devNode = devNode->parent->parent->children;
-	    devNode; devNode = devNode->next) {
-		char	*tprop;
-
-		tprop = GET_PROP(devNode, "name");
-		if (STRCMP(devNode->name, "propgroup") &&
-		    STRCMP(tprop, "io")) {
-			FREE_PROP(tprop);
-			goto propgroup;
-		}
-		FREE_PROP(tprop);
-	}
-	goto fail;
-
-propgroup:
-	/* Retrive the "dev" propval and return */
-	for (devNode = devNode->children; devNode; devNode = devNode->next) {
-		char	*tprop;
-
-		tprop = GET_PROP(devNode, "name");
-		if (STRCMP(devNode->name, "propval") &&
-		    STRCMP(tprop, "dev")) {
-			FREE_PROP(tprop);
-			retval = GET_PROP(devNode, "value");
-			fmd_hdl_debug(hdl, "Addr Dev Path: %s\n", retval);
-			xmlXPathFreeObject(xpathObj);
-			return (retval);
-		}
-		FREE_PROP(tprop);
-	}
-fail:
-	if (xpathObj != NULL)
-		xmlXPathFreeObject(xpathObj);
-	return (NULL);
-}
-
-static void
-fab_pr(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl) {
-	nvpair_t *nvp;
-
-	for (nvp = nvlist_next_nvpair(nvl, NULL);
-	    nvp != NULL;
-	    nvp = nvlist_next_nvpair(nvl, nvp)) {
-
-		data_type_t type = nvpair_type(nvp);
-		const char *name = nvpair_name(nvp);
-
-		boolean_t b;
-		uint8_t i8;
-		uint16_t i16;
-		uint32_t i32;
-		uint64_t i64;
-		char *str;
-		nvlist_t *cnv;
-
-		nvlist_t **nvlarr;
-		uint_t arrsize;
-		int arri;
-
-
-		if (STRCMP(name, FM_CLASS))
-			continue; /* already printed by caller */
-
-		fmd_hdl_debug(hdl, " %s=", name);
-
-		switch (type) {
-		case DATA_TYPE_BOOLEAN:
-			fmd_hdl_debug(hdl, "DATA_TYPE_BOOLEAN 1");
-			break;
-
-		case DATA_TYPE_BOOLEAN_VALUE:
-			(void) nvpair_value_boolean_value(nvp, &b);
-			fmd_hdl_debug(hdl, "DATA_TYPE_BOOLEAN_VALUE %d",
-			    b ? "1" : "0");
-			break;
-
-		case DATA_TYPE_BYTE:
-			(void) nvpair_value_byte(nvp, &i8);
-			fmd_hdl_debug(hdl, "DATA_TYPE_BYTE 0x%x", i8);
-			break;
-
-		case DATA_TYPE_INT8:
-			(void) nvpair_value_int8(nvp, (void *)&i8);
-			fmd_hdl_debug(hdl, "DATA_TYPE_INT8 0x%x", i8);
-			break;
-
-		case DATA_TYPE_UINT8:
-			(void) nvpair_value_uint8(nvp, &i8);
-			fmd_hdl_debug(hdl, "DATA_TYPE_UINT8 0x%x", i8);
-			break;
-
-		case DATA_TYPE_INT16:
-			(void) nvpair_value_int16(nvp, (void *)&i16);
-			fmd_hdl_debug(hdl, "DATA_TYPE_INT16 0x%x", i16);
-			break;
-
-		case DATA_TYPE_UINT16:
-			(void) nvpair_value_uint16(nvp, &i16);
-			fmd_hdl_debug(hdl, "DATA_TYPE_UINT16 0x%x", i16);
-			break;
-
-		case DATA_TYPE_INT32:
-			(void) nvpair_value_int32(nvp, (void *)&i32);
-			fmd_hdl_debug(hdl, "DATA_TYPE_INT32 0x%x", i32);
-			break;
-
-		case DATA_TYPE_UINT32:
-			(void) nvpair_value_uint32(nvp, &i32);
-			fmd_hdl_debug(hdl, "DATA_TYPE_UINT32 0x%x", i32);
-			break;
-
-		case DATA_TYPE_INT64:
-			(void) nvpair_value_int64(nvp, (void *)&i64);
-			fmd_hdl_debug(hdl, "DATA_TYPE_INT64 0x%llx",
-			    (u_longlong_t)i64);
-			break;
-
-		case DATA_TYPE_UINT64:
-			(void) nvpair_value_uint64(nvp, &i64);
-			fmd_hdl_debug(hdl, "DATA_TYPE_UINT64 0x%llx",
-			    (u_longlong_t)i64);
-			break;
-
-		case DATA_TYPE_HRTIME:
-			(void) nvpair_value_hrtime(nvp, (void *)&i64);
-			fmd_hdl_debug(hdl, "DATA_TYPE_HRTIME 0x%llx",
-			    (u_longlong_t)i64);
-			break;
-
-		case DATA_TYPE_STRING:
-			(void) nvpair_value_string(nvp, &str);
-			fmd_hdl_debug(hdl, "DATA_TYPE_STRING \"%s\"",
-			    str ? str : "<NULL>");
-			break;
-
-		case DATA_TYPE_NVLIST:
-			fmd_hdl_debug(hdl, "[");
-			(void) nvpair_value_nvlist(nvp, &cnv);
-			fab_pr(hdl, NULL, cnv);
-			fmd_hdl_debug(hdl, " ]");
-			break;
-
-		case DATA_TYPE_BOOLEAN_ARRAY:
-		case DATA_TYPE_BYTE_ARRAY:
-		case DATA_TYPE_INT8_ARRAY:
-		case DATA_TYPE_UINT8_ARRAY:
-		case DATA_TYPE_INT16_ARRAY:
-		case DATA_TYPE_UINT16_ARRAY:
-		case DATA_TYPE_INT32_ARRAY:
-		case DATA_TYPE_UINT32_ARRAY:
-		case DATA_TYPE_INT64_ARRAY:
-		case DATA_TYPE_UINT64_ARRAY:
-		case DATA_TYPE_STRING_ARRAY:
-			fmd_hdl_debug(hdl, "[...]");
-			break;
-		case DATA_TYPE_NVLIST_ARRAY:
-			arrsize = 0;
-			(void) nvpair_value_nvlist_array(nvp, &nvlarr,
-			    &arrsize);
-
-			for (arri = 0; arri < arrsize; arri++) {
-				fab_pr(hdl, ep, nvlarr[arri]);
-			}
-
-			break;
-		case DATA_TYPE_UNKNOWN:
-			fmd_hdl_debug(hdl, "<unknown>");
-			break;
-		}
-	}
-}
-
 /*ARGSUSED*/
 static void
 fab_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
 {
-	fab_data_t fab_data = {0};
+	nvlist_t *new_nvl;
 
 	if (!fab_valid_topo)
 		fab_update_topo(hdl);
 
-	if (fmd_nvl_class_match(hdl, nvl, "ereport.io.pci.fabric")) {
-		fmd_hdl_debug(hdl, "PCI ereport received: %s\n", class);
-		fab_pci_fabric_to_data(hdl, nvl, &fab_data);
-		fab_xlate_pcie_erpts(hdl, &fab_data);
+	if (nvlist_dup(nvl, &new_nvl, NV_UNIQUE_NAME) != 0) {
+		fmd_hdl_error(hdl, "failed to duplicate event");
+		return;
+	}
+
+	if (fmd_nvl_class_match(hdl, new_nvl, "ereport.io.pci.fabric")) {
+		fab_xlate_fabric_erpts(hdl, new_nvl, class);
 	} else {
-		fab_pr(hdl, ep, nvl);
-		fmd_hdl_debug(hdl, "Fire RC ereport received: %s\n", class);
-		fab_fire_to_data(hdl, nvl, &fab_data);
-		fab_xlate_fire_erpts(hdl, &fab_data, nvl, class);
-		fab_xlate_pcie_erpts(hdl, &fab_data);
+		fab_pr(hdl, ep, new_nvl);
+		if (fmd_nvl_class_match(hdl, new_nvl,
+		    "ereport.io.pciex.rc.epkt")) {
+			fab_xlate_epkt_erpts(hdl, new_nvl, class);
+		} else {
+			fab_xlate_fire_erpts(hdl, new_nvl, class);
+		}
 	}
+
+	nvlist_free(new_nvl);
 }
 
 /* ARGSUSED */
@@ -2022,18 +125,9 @@
 	"Fabric Ereport Translater", "1.0", &fmd_ops, NULL
 };
 
-#define	REG_OFF(reg) ((uint32_t)(uint32_t)&fab_data.reg - (uint32_t)&fab_data)
-#define	SET_TBL(n, err, reg, sz) \
-	fab_master_err_tbl[n].erpt_tbl = fab_ ## err ## _erpt_tbl; \
-	fab_master_err_tbl[n].reg_offset = REG_OFF(reg); \
-	fab_master_err_tbl[n].reg_size = sz; \
-	fab_master_err_tbl[n].fab_prep = fab_prep_ ## err ## _erpt;
-
 void
 _fmd_init(fmd_hdl_t *hdl)
 {
-	fab_data_t fab_data;
-
 	if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0)
 		return;
 
@@ -2043,22 +137,7 @@
 	fab_fmd_xprt = fmd_xprt_open(hdl, FMD_XPRT_RDONLY, NULL, NULL);
 	fmd_hdl_debug(hdl, "Fabric Translater Started\n");
 
-	/* Setup the master error table */
-	fab_master_err_tbl = (fab_err_tbl_t *)calloc(13,
-	    sizeof (fab_err_tbl_t));
-
-	SET_TBL(0, pci,			pci_err_status,	    16);
-	SET_TBL(1, pci_bdg,		pci_bdg_sec_stat,   16);
-	SET_TBL(2, pci_bdg_ctl,		pci_bdg_ctrl,	    16);
-	SET_TBL(3, pcie_ce,		pcie_ce_status,	    32);
-	SET_TBL(4, pcie_ue,		pcie_ue_status,	    32);
-	SET_TBL(5, pcie_sue,		pcie_sue_status,    32);
-	SET_TBL(6, pcix,		pcix_status,	    32);
-	SET_TBL(7, pcix_bdg_sec,	pcix_bdg_sec_stat,  16);
-	SET_TBL(8, pcix_bdg,		pcix_bdg_stat,	    32);
-	SET_TBL(9, pcie_nadv,		pcie_err_status,    16);
-	SET_TBL(10, pcie_rc,		pcie_rp_err_status, 32);
-	SET_TBL(11, pcie_fake_rc,	pcie_err_status,    16);
+	fab_setup_master_table();
 }
 
 void
--- a/usr/src/cmd/fm/modules/common/fabric-xlate/fabric-xlate.conf	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/fm/modules/common/fabric-xlate/fabric-xlate.conf	Fri Dec 11 10:41:17 2009 -0800
@@ -37,4 +37,4 @@
 subscribe ereport.io.n2.dmu.sun4v_inv_pg_sz
 subscribe ereport.io.n2.dmu.sun4v_key_err
 subscribe ereport.io.n2.dmu.sun4v_va_oor
-
+subscribe ereport.io.pciex.rc.epkt
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/fm/modules/common/fabric-xlate/fabric-xlate.h	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,173 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _FABRIC_XLATE_H
+#define	_FABRIC_XLATE_H
+
+#include <fm/fmd_api.h>
+#include <sys/fm/protocol.h>
+#include <sys/nvpair.h>
+#include <sys/types.h>
+#include <sys/pcie.h>
+#include <sys/fm/io/pci.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define	STRCMP(s1, s2) (strcmp((const char *)s1, (const char *)s2) == 0)
+/*
+ * These values are used for the xxx_tgt_trans value in fab_data_t.  They are
+ * originally set in pcie_fault.c and originally defined in pcie_impl.h.
+ */
+#define	PF_ADDR_DMA		(1 << 0)
+#define	PF_ADDR_PIO		(1 << 1)
+#define	PF_ADDR_CFG		(1 << 2)
+
+extern fmd_xprt_t *fab_fmd_xprt;	/* FMD transport layer handle */
+extern char fab_buf[];
+
+/* PCI-E config space data for error handling and fabric ereports */
+typedef struct fab_data {
+	/* Original ereport NVL */
+	nvlist_t	*nvl;
+
+	/* Device Information */
+	uint16_t bdf;
+	uint16_t device_id;
+	uint16_t vendor_id;
+	uint8_t rev_id;
+	uint16_t dev_type;
+	uint16_t pcie_off;
+	uint16_t pcix_off;
+	uint16_t aer_off;
+	uint16_t ecc_ver;
+
+	/* Ereport Information */
+	uint32_t remainder;
+	uint32_t severity;
+
+	/* Error Registers */
+	uint16_t pci_err_status;	/* pci status register */
+	uint16_t pci_cfg_comm;		/* pci command register */
+
+	uint16_t pci_bdg_sec_stat;	/* PCI secondary status reg */
+	uint16_t pci_bdg_ctrl;		/* PCI bridge control reg */
+
+	uint16_t pcix_command;		/* pcix command register */
+	uint32_t pcix_status;		/* pcix status register */
+
+	uint16_t pcix_bdg_sec_stat;	/* pcix bridge secondary status reg */
+	uint32_t pcix_bdg_stat;		/* pcix bridge status reg */
+
+	uint16_t pcix_ecc_control_0;	/* pcix ecc control status reg */
+	uint16_t pcix_ecc_status_0;	/* pcix ecc control status reg */
+	uint32_t pcix_ecc_fst_addr_0;	/* pcix ecc first address reg */
+	uint32_t pcix_ecc_sec_addr_0;	/* pcix ecc second address reg */
+	uint32_t pcix_ecc_attr_0;	/* pcix ecc attributes reg */
+	uint16_t pcix_ecc_control_1;	/* pcix ecc control status reg */
+	uint16_t pcix_ecc_status_1;	/* pcix ecc control status reg */
+	uint32_t pcix_ecc_fst_addr_1;	/* pcix ecc first address reg */
+	uint32_t pcix_ecc_sec_addr_1;	/* pcix ecc second address reg */
+	uint32_t pcix_ecc_attr_1;	/* pcix ecc attributes reg */
+
+	uint16_t pcie_err_status;	/* pcie device status register */
+	uint16_t pcie_err_ctl;		/* pcie error control register */
+	uint32_t pcie_dev_cap;		/* pcie device capabilities register */
+
+	uint32_t pcie_adv_ctl;		/* pcie advanced control reg */
+	uint32_t pcie_ue_status;	/* pcie ue error status reg */
+	uint32_t pcie_ue_mask;		/* pcie ue error mask reg */
+	uint32_t pcie_ue_sev;		/* pcie ue error severity reg */
+	uint32_t pcie_ue_hdr[4];	/* pcie ue header log */
+	uint32_t pcie_ce_status;	/* pcie ce error status reg */
+	uint32_t pcie_ce_mask;		/* pcie ce error mask reg */
+	uint32_t pcie_ue_tgt_trans;	/* Fault trans type from AER Logs */
+	uint64_t pcie_ue_tgt_addr;	/* Fault addr from AER Logs */
+	pcie_req_id_t pcie_ue_tgt_bdf;	/* Fault bdf from SAER Logs */
+	boolean_t pcie_ue_no_tgt_erpt;  /* Don't send target ereports */
+
+	uint32_t pcie_sue_ctl;		/* pcie bridge secondary ue control */
+	uint32_t pcie_sue_status;	/* pcie bridge secondary ue status */
+	uint32_t pcie_sue_mask;		/* pcie bridge secondary ue mask */
+	uint32_t pcie_sue_sev;		/* pcie bridge secondary ue severity */
+	uint32_t pcie_sue_hdr[4];	/* pcie bridge secondary ue hdr log */
+	uint32_t pcie_sue_tgt_trans;	/* Fault trans type from AER Logs */
+	uint64_t pcie_sue_tgt_addr;	/* Fault addr from AER Logs */
+	pcie_req_id_t pcie_sue_tgt_bdf;	/* Fault bdf from SAER Logs */
+
+	uint32_t pcie_rp_status;	/* root complex status register */
+	uint16_t pcie_rp_ctl;		/* root complex control register */
+	uint32_t pcie_rp_err_status;	/* pcie root complex error status reg */
+	uint32_t pcie_rp_err_cmd;	/* pcie root complex error cmd reg */
+	uint16_t pcie_rp_ce_src_id;	/* pcie root complex ce sourpe id */
+	uint16_t pcie_rp_ue_src_id;	/* pcie root complex ue sourpe id */
+} fab_data_t;
+
+typedef struct fab_erpt_tbl {
+	const char	*err_class;	/* Final Ereport Class */
+	uint32_t	reg_bit;	/* Error Bit Mask */
+	/* Pointer to function that prepares the ereport body */
+	const char	*tgt_class;	/* Target Ereport Class */
+} fab_erpt_tbl_t;
+
+typedef struct fab_err_tbl {
+	fab_erpt_tbl_t	*erpt_tbl;	/* ereport table */
+	uint32_t	reg_offset;	/* sts reg for ereport table offset */
+	uint32_t	reg_size;	/* size of the status register */
+	/* Pointer to function that prepares the ereport body */
+	int		(*fab_prep)(fmd_hdl_t *, fab_data_t *, nvlist_t *,
+	    fab_erpt_tbl_t *);
+} fab_err_tbl_t;
+
+extern void fab_setup_master_table();
+
+/* Main functions for converting "fabric" ereports */
+extern void fab_xlate_pcie_erpts(fmd_hdl_t *, fab_data_t *);
+extern void fab_xlate_fabric_erpts(fmd_hdl_t *, nvlist_t *, const char *);
+extern void fab_xlate_fire_erpts(fmd_hdl_t *, nvlist_t *, const char *);
+extern void fab_xlate_epkt_erpts(fmd_hdl_t *, nvlist_t *, const char *);
+
+/* Common functions for sending translated ereports */
+extern int fab_prep_basic_erpt(fmd_hdl_t *, nvlist_t *, nvlist_t *, boolean_t);
+extern void fab_send_tgt_erpt(fmd_hdl_t *, fab_data_t *, const char *,
+    boolean_t);
+extern void fab_send_erpt(fmd_hdl_t *hdl, fab_data_t *data, fab_err_tbl_t *tbl);
+
+/* Misc Functions */
+extern void fab_pr(fmd_hdl_t *, fmd_event_t *, nvlist_t *);
+extern boolean_t fab_get_rcpath(fmd_hdl_t *hdl, nvlist_t *nvl, char *rcpath);
+extern char *fab_find_rppath_by_df(fmd_hdl_t *, nvlist_t *, uint8_t);
+extern char *fab_find_rppath_by_devbdf(fmd_hdl_t *, nvlist_t *, pcie_req_id_t);
+extern char *fab_find_addr(fmd_hdl_t *hdl, nvlist_t *nvl, uint64_t addr);
+extern char *fab_find_bdf(fmd_hdl_t *hdl, nvlist_t *nvl, pcie_req_id_t bdf);
+extern boolean_t fab_hc2dev(fmd_hdl_t *, const char *, char **);
+extern boolean_t fab_hc2dev_nvl(fmd_hdl_t *, nvlist_t *, char **);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FABRIC_XLATE_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/fm/modules/common/fabric-xlate/fx_epkt.c	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,262 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+#include <sys/types.h>
+#include <px_err.h>
+
+#include "fabric-xlate.h"
+
+#define	EPKT_DESC(b, o, p, c, d) (BLOCK_##b << 16 | OP_##o << 12 | \
+    PH_##p << 8 | CND_##c << 4 | DIR_##d)
+
+/* EPKT Table used only for RC/RP errors */
+typedef struct fab_epkt_tbl {
+	uint32_t	epkt_desc;
+	uint32_t	pcie_ue_sts;	/* Equivalent PCIe UE Status */
+	uint16_t	pci_err_sts;	/* Equivalent PCI Error Status */
+	uint16_t	pci_bdg_sts;	/* Equivalent PCI Bridge Status */
+	const char	*tgt_class;	/* Target Ereport Class */
+} fab_epkt_tbl_t;
+
+static fab_epkt_tbl_t fab_epkt_tbl[] = {
+	EPKT_DESC(MMU, XLAT, DATA, INV, RDWR),
+	PCIE_AER_UCE_CA, 0, PCI_STAT_S_TARG_AB, 0,
+	EPKT_DESC(MMU, XLAT, ADDR, UNMAP, RDWR),
+	PCIE_AER_UCE_CA, 0, PCI_STAT_S_TARG_AB, 0,
+	EPKT_DESC(MMU, XLAT, DATA, PROT, RDWR),
+	PCIE_AER_UCE_CA, 0, PCI_STAT_S_TARG_AB, 0,
+
+	EPKT_DESC(INTR, MSI32, DATA, ILL, IRR),
+	PCIE_AER_UCE_MTLP, PCI_STAT_S_SYSERR, 0, 0,
+
+	EPKT_DESC(PORT, PIO, IRR, RCA, WRITE),
+	PCIE_AER_UCE_CA, PCI_STAT_S_SYSERR, PCI_STAT_S_TARG_AB, 0,
+
+	EPKT_DESC(PORT, PIO, IRR, RUR, WRITE),
+	PCIE_AER_UCE_UR, PCI_STAT_S_SYSERR, 0, 0,
+
+	EPKT_DESC(PORT, PIO, IRR, INV, RDWR),
+	PCIE_AER_UCE_MTLP, PCI_STAT_S_SYSERR, 0, 0,
+
+	EPKT_DESC(PORT, PIO, IRR, TO, READ),
+	PCIE_AER_UCE_TO, PCI_STAT_S_SYSERR, 0, PCI_TARG_MA,
+	EPKT_DESC(PORT, PIO, IRR, TO, WRITE),
+	PCIE_AER_UCE_TO, PCI_STAT_S_SYSERR, 0, PCI_TARG_MA,
+
+	EPKT_DESC(PORT, PIO, IRR, UC, IRR),
+	PCIE_AER_UCE_UC, PCI_STAT_S_SYSERR, 0, 0,
+
+	EPKT_DESC(PORT, LINK, FC, TO, IRR),
+	PCIE_AER_UCE_FCP, PCI_STAT_S_SYSERR, 0, 0,
+
+	0, 0, 0, 0, 0
+};
+
+/* ARGSUSED */
+void
+fab_epkt_to_data(fmd_hdl_t *hdl, nvlist_t *nvl, fab_data_t *data)
+{
+	data->nvl = nvl;
+
+	/* Always Root Complex */
+	data->dev_type = PCIE_PCIECAP_DEV_TYPE_ROOT;
+
+	data->pcie_ue_sev = (PCIE_AER_UCE_DLP | PCIE_AER_UCE_SD |
+	    PCIE_AER_UCE_FCP | PCIE_AER_UCE_RO | PCIE_AER_UCE_MTLP);
+}
+
+static int
+fab_xlate_epkt(fmd_hdl_t *hdl, fab_data_t *data, px_rc_err_t *epktp)
+{
+	fab_epkt_tbl_t *entry;
+	uint32_t temp;
+
+	for (entry = fab_epkt_tbl; entry->epkt_desc != 0; entry++) {
+		temp = *(uint32_t *)&epktp->rc_descr >> 12;
+		if (entry->epkt_desc == temp)
+			goto send;
+	}
+
+	return (0);
+
+send:
+	fmd_hdl_debug(hdl, "Translate epkt DESC = %#x\n", temp);
+
+	/* Fill in PCI Status Register */
+	data->pci_err_status = entry->pci_err_sts;
+	data->pci_bdg_sec_stat = entry->pci_bdg_sts;
+
+	/* Fill in the device status register */
+	if (epktp->rc_descr.STOP)
+		data->pcie_err_status = PCIE_DEVSTS_FE_DETECTED;
+	else if (epktp->rc_descr.C)
+		data->pcie_err_status = PCIE_DEVSTS_CE_DETECTED;
+	else
+		data->pcie_err_status = PCIE_DEVSTS_NFE_DETECTED;
+
+	/* Fill in the AER UE register */
+	data->pcie_ue_status = entry->pcie_ue_sts;
+
+	/* Fill in the AER Control register */
+	temp = entry->pcie_ue_sts;
+	for (data->pcie_adv_ctl = (uint32_t)-1; temp; data->pcie_adv_ctl++)
+		temp = temp >> 1;
+
+	/* Send target ereports */
+	data->pcie_ue_no_tgt_erpt = B_TRUE;
+	if (entry->tgt_class && !epktp->rc_descr.STOP) {
+		if (epktp->rc_descr.D) {
+			data->pcie_ue_tgt_trans = PF_ADDR_DMA;
+			data->pcie_ue_tgt_addr = epktp->addr;
+		} else if (epktp->rc_descr.M) {
+			data->pcie_ue_tgt_trans = PF_ADDR_PIO;
+			data->pcie_ue_tgt_addr = epktp->addr;
+		}
+
+		if (data->pcie_ue_tgt_trans)
+			fab_send_tgt_erpt(hdl, data, entry->tgt_class,
+			    B_TRUE);
+	}
+	return (1);
+}
+
+void
+fab_xlate_epkt_erpts(fmd_hdl_t *hdl, nvlist_t *nvl, const char *class)
+{
+	fab_data_t data = {0};
+	px_rc_err_t epkt = {0};
+	pcie_tlp_hdr_t *tlp_hdr;
+	void *ptr;
+	uint8_t ver;
+	int err;
+	char *rppath = NULL;
+	nvlist_t *detector;
+
+	fmd_hdl_debug(hdl, "epkt ereport received: %s\n", class);
+	fab_epkt_to_data(hdl, nvl, &data);
+
+	err = nvlist_lookup_uint8(nvl, "epkt_ver", &ver);
+	err |= nvlist_lookup_uint32(nvl, "desc", (uint32_t *)&epkt.rc_descr);
+	err |= nvlist_lookup_uint32(nvl, "size", &epkt.size);
+	err |= nvlist_lookup_uint64(nvl, "addr", &epkt.addr);
+	err |= nvlist_lookup_uint64(nvl, "hdr1", &epkt.hdr[0]);
+	err |= nvlist_lookup_uint64(nvl, "hdr2", &epkt.hdr[1]);
+	err |= nvlist_lookup_uint64(nvl, "reserved", &epkt.reserved);
+
+	if (err != 0) {
+		fmd_hdl_debug(hdl, "Failed to retrieve all epkt payloads");
+		return;
+	}
+
+	fmd_hdl_debug(hdl, "epkt flags: %c%c%c%c%c%c%c%c%c %s",
+	    epkt.rc_descr.S ? 'S' : '-', epkt.rc_descr.M ? 'M' : '-',
+	    epkt.rc_descr.S ? 'Q' : '-', epkt.rc_descr.D ? 'D' : '-',
+	    epkt.rc_descr.R ? 'R' : '-', epkt.rc_descr.H ? 'H' : '-',
+	    epkt.rc_descr.C ? 'C' : '-', epkt.rc_descr.I ? 'I' : '-',
+	    epkt.rc_descr.B ? 'B' : '-', epkt.rc_descr.STOP ? "STOP" : "");
+
+	/*
+	 * If the least byte of the 'reserved' is non zero, it is device
+	 * and function of the port
+	 */
+	if (epkt.reserved && 0xff)
+		rppath = fab_find_rppath_by_df(hdl, nvl, epkt.reserved & 0xff);
+
+	if (epkt.rc_descr.H) {
+		data.pcie_ue_hdr[0] = (uint32_t)(epkt.hdr[0] >> 32);
+		data.pcie_ue_hdr[1] = (uint32_t)epkt.hdr[0];
+		data.pcie_ue_hdr[2] = (uint32_t)(epkt.hdr[1] >> 32);
+		data.pcie_ue_hdr[3] = (uint32_t)(epkt.hdr[1]);
+
+		tlp_hdr = (pcie_tlp_hdr_t *)&data.pcie_ue_hdr[0];
+		ptr = &data.pcie_ue_hdr[1];
+		switch (tlp_hdr->type) {
+		case PCIE_TLP_TYPE_IO:
+		case PCIE_TLP_TYPE_MEM:
+		case PCIE_TLP_TYPE_MEMLK:
+		{
+			pcie_mem64_t *pmp = ptr;
+			data.pcie_ue_tgt_trans = PF_ADDR_PIO;
+			data.pcie_ue_tgt_bdf = pmp->rid;
+			if (tlp_hdr->fmt & 0x1)
+				data.pcie_ue_tgt_addr =
+				    ((uint64_t)pmp->addr1 << 32) | pmp->addr0;
+			else
+				data.pcie_ue_tgt_addr =
+				    ((pcie_memio32_t *)ptr)->addr0;
+
+			break;
+		}
+
+		case PCIE_TLP_TYPE_CFG0:
+		case PCIE_TLP_TYPE_CFG1:
+		{
+			pcie_cfg_t *pcp = ptr;
+
+			data.pcie_ue_tgt_trans = PF_ADDR_CFG;
+			data.pcie_ue_tgt_bdf =
+			    (pcp->bus << 8) | (pcp->dev << 3) | pcp->func;
+			break;
+		}
+
+		case PCIE_TLP_TYPE_CPL:
+		case PCIE_TLP_TYPE_CPLLK:
+			data.pcie_ue_tgt_bdf = ((pcie_cpl_t *)ptr)->rid;
+			break;
+		}
+
+		fmd_hdl_debug(hdl, "HEADER 0 0x%x", data.pcie_ue_hdr[0]);
+		fmd_hdl_debug(hdl, "HEADER 1 0x%x", data.pcie_ue_hdr[1]);
+		fmd_hdl_debug(hdl, "HEADER 2 0x%x", data.pcie_ue_hdr[2]);
+		fmd_hdl_debug(hdl, "HEADER 3 0x%x", data.pcie_ue_hdr[3]);
+		fmd_hdl_debug(hdl, "In header bdf = %#hx addr = %#llx",
+		    data.pcie_ue_tgt_bdf,
+		    (uint64_t)data.pcie_ue_tgt_addr);
+
+		/* find the root port to which this error is related */
+		if (data.pcie_ue_tgt_bdf)
+			rppath = fab_find_rppath_by_devbdf(hdl, nvl,
+			    data.pcie_ue_tgt_bdf);
+	}
+
+	/*
+	 * reset the detector in the original ereport to the root port
+	 */
+	if (rppath && nvlist_alloc(&detector, NV_UNIQUE_NAME, 0) == 0) {
+		(void) nvlist_add_string(detector, FM_VERSION,
+		    FM_DEV_SCHEME_VERSION);
+		(void) nvlist_add_string(detector, FM_FMRI_SCHEME,
+		    FM_FMRI_SCHEME_DEV);
+		(void) nvlist_add_string(detector, FM_FMRI_DEV_PATH, rppath);
+		(void) nvlist_remove_all(nvl, FM_EREPORT_DETECTOR);
+		(void) nvlist_add_nvlist(nvl, FM_EREPORT_DETECTOR, detector);
+		nvlist_free(detector);
+	}
+
+	fmd_hdl_strfree(hdl, rppath);
+
+	(void) fab_xlate_epkt(hdl, &data, &epkt);
+	fab_xlate_pcie_erpts(hdl, &data);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/fm/modules/common/fabric-xlate/fx_fabric.c	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,834 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+#include <stddef.h>
+#include <strings.h>
+#include <sys/fm/util.h>
+
+#include "fabric-xlate.h"
+
+#define	FAB_LOOKUP(sz, name, field) \
+	(void) nvlist_lookup_uint ## sz(nvl, name, field)
+
+static boolean_t fab_xlate_fake_rp = B_TRUE;
+static fab_err_tbl_t *fab_master_err_tbl;
+
+/*
+ * Translation tables for converting "fabric" error bits into "pci" ereports.
+ * <Ereport Class Name>, <Error Bit Mask>, <Preparation Function>
+ */
+
+/* MACRO for table entries with no TGT ereports */
+#define	NT(class, bit, prep) class, bit, prep, NULL
+/* Translate Fabric ereports to ereport.io.pci.* */
+fab_erpt_tbl_t fab_pci_erpt_tbl[] = {
+	PCI_DET_PERR,		PCI_STAT_PERROR,	NULL,
+	PCI_MDPE,		PCI_STAT_S_PERROR,	NULL,
+	PCI_SIG_SERR,		PCI_STAT_S_SYSERR,	NULL,
+	PCI_MA,			PCI_STAT_R_MAST_AB,	NULL,
+	PCI_REC_TA,		PCI_STAT_R_TARG_AB,	NULL,
+	PCI_SIG_TA,		PCI_STAT_S_TARG_AB,	NULL,
+	NULL, NULL, NULL
+};
+
+/* Translate Fabric ereports to ereport.io.pci.sec-* */
+static fab_erpt_tbl_t fab_pci_bdg_erpt_tbl[] = {
+	PCI_DET_PERR,		PCI_STAT_PERROR,	NULL,
+	PCI_MDPE,		PCI_STAT_S_PERROR,	NULL,
+	PCI_REC_SERR,		PCI_STAT_S_SYSERR,	NULL,
+#ifdef sparc
+	PCI_MA,			PCI_STAT_R_MAST_AB,	NULL,
+#endif
+	PCI_REC_TA,		PCI_STAT_R_TARG_AB,	NULL,
+	PCI_SIG_TA,		PCI_STAT_S_TARG_AB,	NULL,
+	NULL, NULL, NULL, NULL,
+};
+
+
+/* Translate Fabric ereports to ereport.io.pci.dto */
+static fab_erpt_tbl_t fab_pci_bdg_ctl_erpt_tbl[] = {
+	PCI_DTO,	PCI_BCNF_BCNTRL_DTO_STAT,	NULL,
+	NULL, NULL, NULL
+};
+
+/* Translate Fabric ereports to ereport.io.pciex.* */
+static fab_erpt_tbl_t fab_pcie_ce_erpt_tbl[] = {
+	PCIEX_RE,	PCIE_AER_CE_RECEIVER_ERR,	NULL,
+	PCIEX_RNR,	PCIE_AER_CE_REPLAY_ROLLOVER,	NULL,
+	PCIEX_RTO,	PCIE_AER_CE_REPLAY_TO,		NULL,
+	PCIEX_BDP,	PCIE_AER_CE_BAD_DLLP,		NULL,
+	PCIEX_BTP,	PCIE_AER_CE_BAD_TLP,		NULL,
+	PCIEX_ANFE,	PCIE_AER_CE_AD_NFE,		NULL,
+	NULL, NULL, NULL
+};
+
+/*
+ * Translate Fabric ereports to ereport.io.pciex.*
+ * The Target Ereports for this section is only used on leaf devices, with the
+ * exception of TO
+ */
+static fab_erpt_tbl_t fab_pcie_ue_erpt_tbl[] = {
+	PCIEX_TE,	PCIE_AER_UCE_TRAINING,		NULL,
+	PCIEX_DLP,	PCIE_AER_UCE_DLP,		NULL,
+	PCIEX_SD,	PCIE_AER_UCE_SD,		NULL,
+	PCIEX_ROF,	PCIE_AER_UCE_RO,		NULL,
+	PCIEX_FCP,	PCIE_AER_UCE_FCP,		NULL,
+	PCIEX_MFP,	PCIE_AER_UCE_MTLP,		NULL,
+	PCIEX_CTO,	PCIE_AER_UCE_TO,		PCI_TARG_MA,
+	PCIEX_UC,	PCIE_AER_UCE_UC,		NULL,
+	PCIEX_ECRC,	PCIE_AER_UCE_ECRC,		NULL,
+	PCIEX_CA,	PCIE_AER_UCE_CA,		PCI_TARG_REC_TA,
+#ifdef sparc
+	PCIEX_UR,	PCIE_AER_UCE_UR,		PCI_TARG_MA,
+#endif
+	PCIEX_POIS,	PCIE_AER_UCE_PTLP,		PCI_TARG_MDPE,
+	NULL, NULL, NULL
+};
+
+/* Translate Fabric ereports to ereport.io.pciex.* */
+static fab_erpt_tbl_t fab_pcie_sue_erpt_tbl[] = {
+	PCIEX_S_TA_SC,	PCIE_AER_SUCE_TA_ON_SC,		PCI_TARG_REC_TA,
+	PCIEX_S_MA_SC,	PCIE_AER_SUCE_MA_ON_SC,		PCI_TARG_MA,
+	PCIEX_S_RTA,	PCIE_AER_SUCE_RCVD_TA,		PCI_TARG_REC_TA,
+#ifdef sparc
+	PCIEX_S_RMA,	PCIE_AER_SUCE_RCVD_MA,		PCI_TARG_MA,
+#endif
+	PCIEX_S_USC,	PCIE_AER_SUCE_USC_ERR,		NULL,
+	PCIEX_S_USCMD,	PCIE_AER_SUCE_USC_MSG_DATA_ERR,	PCI_TARG_REC_TA,
+	PCIEX_S_UDE,	PCIE_AER_SUCE_UC_DATA_ERR,	PCI_TARG_MDPE,
+	PCIEX_S_UAT,	PCIE_AER_SUCE_UC_ATTR_ERR,	PCI_TARG_MDPE,
+	PCIEX_S_UADR,	PCIE_AER_SUCE_UC_ADDR_ERR,	PCI_TARG_MDPE,
+	PCIEX_S_TEX,	PCIE_AER_SUCE_TIMER_EXPIRED,	NULL,
+	PCIEX_S_PERR,	PCIE_AER_SUCE_PERR_ASSERT,	PCI_TARG_MDPE,
+	PCIEX_S_SERR,	PCIE_AER_SUCE_SERR_ASSERT,	NULL,
+	PCIEX_INTERR,	PCIE_AER_SUCE_INTERNAL_ERR,	NULL,
+	NULL, NULL, NULL
+};
+
+/* Translate Fabric ereports to ereport.io.pcix.* */
+static fab_erpt_tbl_t fab_pcix_erpt_tbl[] = {
+	PCIX_SPL_DIS,		PCI_PCIX_SPL_DSCD,	NULL,
+	PCIX_UNEX_SPL,		PCI_PCIX_UNEX_SPL,	NULL,
+	PCIX_RX_SPL_MSG,	PCI_PCIX_RX_SPL_MSG,	NULL,
+	NULL, NULL, NULL
+};
+static fab_erpt_tbl_t *fab_pcix_bdg_erpt_tbl = fab_pcix_erpt_tbl;
+
+/* Translate Fabric ereports to ereport.io.pcix.sec-* */
+static fab_erpt_tbl_t fab_pcix_bdg_sec_erpt_tbl[] = {
+	PCIX_SPL_DIS,		PCI_PCIX_BSS_SPL_DSCD,	NULL,
+	PCIX_UNEX_SPL,		PCI_PCIX_BSS_UNEX_SPL,	NULL,
+	PCIX_BSS_SPL_OR,	PCI_PCIX_BSS_SPL_OR,	NULL,
+	PCIX_BSS_SPL_DLY,	PCI_PCIX_BSS_SPL_DLY,	NULL,
+	NULL, NULL, NULL
+};
+
+/* Translate Fabric ereports to ereport.io.pciex.* */
+static fab_erpt_tbl_t fab_pcie_nadv_erpt_tbl[] = {
+#ifdef sparc
+	PCIEX_UR,		PCIE_DEVSTS_UR_DETECTED,	NULL,
+#endif
+	PCIEX_FAT,		PCIE_DEVSTS_FE_DETECTED,	NULL,
+	PCIEX_NONFAT,		PCIE_DEVSTS_NFE_DETECTED,	NULL,
+	PCIEX_CORR,		PCIE_DEVSTS_CE_DETECTED,	NULL,
+	NULL, NULL, NULL
+};
+
+/* Translate Fabric ereports to ereport.io.pciex.* */
+static fab_erpt_tbl_t fab_pcie_rc_erpt_tbl[] = {
+	PCIEX_RC_FE_MSG,	PCIE_AER_RE_STS_FE_MSGS_RCVD,	NULL,
+	PCIEX_RC_NFE_MSG,	PCIE_AER_RE_STS_NFE_MSGS_RCVD,	NULL,
+	PCIEX_RC_CE_MSG,	PCIE_AER_RE_STS_CE_RCVD,	NULL,
+	PCIEX_RC_MCE_MSG,	PCIE_AER_RE_STS_MUL_CE_RCVD,	NULL,
+	PCIEX_RC_MUE_MSG,	PCIE_AER_RE_STS_MUL_FE_NFE_RCVD, NULL,
+	NULL, NULL, NULL
+};
+
+/*
+ * Translate Fabric ereports to pseudo ereport.io.pciex.* RC Fabric Messages.
+ * If the RP is not a PCIe compliant RP or does not support AER, rely on the
+ * leaf fabric ereport to help create a xxx_MSG ereport coming from the RC.
+ */
+static fab_erpt_tbl_t fab_pcie_fake_rc_erpt_tbl[] = {
+	PCIEX_RC_FE_MSG,	PCIE_DEVSTS_FE_DETECTED,	NULL,
+	PCIEX_RC_NFE_MSG,	PCIE_DEVSTS_NFE_DETECTED,	NULL,
+	PCIEX_RC_CE_MSG,	PCIE_DEVSTS_CE_DETECTED,	NULL,
+	NULL, NULL, NULL,
+};
+
+/* ARGSUSED */
+void
+fab_pci_fabric_to_data(fmd_hdl_t *hdl, nvlist_t *nvl, fab_data_t *data)
+{
+	data->nvl = nvl;
+
+	/* Generic PCI device information */
+	FAB_LOOKUP(16,	"bdf",			&data->bdf);
+	FAB_LOOKUP(16,	"device_id",		&data->device_id);
+	FAB_LOOKUP(16,	"vendor_id",		&data->vendor_id);
+	FAB_LOOKUP(8,	"rev_id",		&data->rev_id);
+	FAB_LOOKUP(16,	"dev_type",		&data->dev_type);
+	FAB_LOOKUP(16,	"pcie_off",		&data->pcie_off);
+	FAB_LOOKUP(16,	"pcix_off",		&data->pcix_off);
+	FAB_LOOKUP(16,	"aer_off",		&data->aer_off);
+	FAB_LOOKUP(16,	"ecc_ver",		&data->ecc_ver);
+
+	/* Misc ereport information */
+	FAB_LOOKUP(32,	"remainder",		&data->remainder);
+	FAB_LOOKUP(32,	"severity",		&data->severity);
+
+	/* PCI registers */
+	FAB_LOOKUP(16,	"pci_status",		&data->pci_err_status);
+	FAB_LOOKUP(16,	"pci_command",		&data->pci_cfg_comm);
+
+	/* PCI bridge registers */
+	FAB_LOOKUP(16,	"pci_bdg_sec_status",	&data->pci_bdg_sec_stat);
+	FAB_LOOKUP(16,	"pci_bdg_ctrl",		&data->pci_bdg_ctrl);
+
+	/* PCIx registers */
+	FAB_LOOKUP(32,	"pcix_status",		&data->pcix_status);
+	FAB_LOOKUP(16,	"pcix_command",		&data->pcix_command);
+
+	/* PCIx ECC Registers */
+	FAB_LOOKUP(16,	"pcix_ecc_control_0",	&data->pcix_ecc_control_0);
+	FAB_LOOKUP(16,	"pcix_ecc_status_0",	&data->pcix_ecc_status_0);
+	FAB_LOOKUP(32,	"pcix_ecc_fst_addr_0",	&data->pcix_ecc_fst_addr_0);
+	FAB_LOOKUP(32,	"pcix_ecc_sec_addr_0",	&data->pcix_ecc_sec_addr_0);
+	FAB_LOOKUP(32,	"pcix_ecc_attr_0",	&data->pcix_ecc_attr_0);
+
+	/* PCIx ECC Bridge Registers */
+	FAB_LOOKUP(16,	"pcix_ecc_control_1",	&data->pcix_ecc_control_1);
+	FAB_LOOKUP(16,	"pcix_ecc_status_1",	&data->pcix_ecc_status_1);
+	FAB_LOOKUP(32,	"pcix_ecc_fst_addr_1",	&data->pcix_ecc_fst_addr_1);
+	FAB_LOOKUP(32,	"pcix_ecc_sec_addr_1",	&data->pcix_ecc_sec_addr_1);
+	FAB_LOOKUP(32,	"pcix_ecc_attr_1",	&data->pcix_ecc_attr_1);
+
+	/* PCIx Bridge */
+	FAB_LOOKUP(32,	"pcix_bdg_status",	&data->pcix_bdg_stat);
+	FAB_LOOKUP(16,	"pcix_bdg_sec_status",	&data->pcix_bdg_sec_stat);
+
+	/* PCIe registers */
+	FAB_LOOKUP(16,	"pcie_status",		&data->pcie_err_status);
+	FAB_LOOKUP(16,	"pcie_command",		&data->pcie_err_ctl);
+	FAB_LOOKUP(32,	"pcie_dev_cap",		&data->pcie_dev_cap);
+
+	/* PCIe AER registers */
+	FAB_LOOKUP(32,	"pcie_adv_ctl",		&data->pcie_adv_ctl);
+	FAB_LOOKUP(32,	"pcie_ue_status",	&data->pcie_ue_status);
+	FAB_LOOKUP(32,	"pcie_ue_mask",		&data->pcie_ue_mask);
+	FAB_LOOKUP(32,	"pcie_ue_sev",		&data->pcie_ue_sev);
+	FAB_LOOKUP(32,	"pcie_ue_hdr0",		&data->pcie_ue_hdr[0]);
+	FAB_LOOKUP(32,	"pcie_ue_hdr1",		&data->pcie_ue_hdr[1]);
+	FAB_LOOKUP(32,	"pcie_ue_hdr2",		&data->pcie_ue_hdr[2]);
+	FAB_LOOKUP(32,	"pcie_ue_hdr3",		&data->pcie_ue_hdr[3]);
+	FAB_LOOKUP(32,	"pcie_ce_status",	&data->pcie_ce_status);
+	FAB_LOOKUP(32,	"pcie_ce_mask",		&data->pcie_ce_mask);
+	FAB_LOOKUP(32,	"pcie_ue_tgt_trans",	&data->pcie_ue_tgt_trans);
+	FAB_LOOKUP(64,	"pcie_ue_tgt_addr",	&data->pcie_ue_tgt_addr);
+	FAB_LOOKUP(16,	"pcie_ue_tgt_bdf",	&data->pcie_ue_tgt_bdf);
+
+	/* PCIe BDG AER registers */
+	FAB_LOOKUP(32,	"pcie_sue_adv_ctl",	&data->pcie_sue_ctl);
+	FAB_LOOKUP(32,	"pcie_sue_status",	&data->pcie_sue_status);
+	FAB_LOOKUP(32,	"pcie_sue_mask",	&data->pcie_sue_mask);
+	FAB_LOOKUP(32,	"pcie_sue_sev",		&data->pcie_sue_sev);
+	FAB_LOOKUP(32,	"pcie_sue_hdr0",	&data->pcie_sue_hdr[0]);
+	FAB_LOOKUP(32,	"pcie_sue_hdr1",	&data->pcie_sue_hdr[1]);
+	FAB_LOOKUP(32,	"pcie_sue_hdr2",	&data->pcie_sue_hdr[2]);
+	FAB_LOOKUP(32,	"pcie_sue_hdr3",	&data->pcie_sue_hdr[3]);
+	FAB_LOOKUP(32,	"pcie_sue_tgt_trans",	&data->pcie_sue_tgt_trans);
+	FAB_LOOKUP(64,	"pcie_sue_tgt_addr",	&data->pcie_sue_tgt_addr);
+	FAB_LOOKUP(16,	"pcie_sue_tgt_bdf",	&data->pcie_sue_tgt_bdf);
+
+	/* PCIe RP registers */
+	FAB_LOOKUP(32,	"pcie_rp_status",	&data->pcie_rp_status);
+	FAB_LOOKUP(16,	"pcie_rp_control",	&data->pcie_rp_ctl);
+
+	/* PCIe RP AER registers */
+	FAB_LOOKUP(32,	"pcie_adv_rp_status",	&data->pcie_rp_err_status);
+	FAB_LOOKUP(32,	"pcie_adv_rp_command",	&data->pcie_rp_err_cmd);
+	FAB_LOOKUP(16,	"pcie_adv_rp_ce_src_id", &data->pcie_rp_ce_src_id);
+	FAB_LOOKUP(16,	"pcie_adv_rp_ue_src_id", &data->pcie_rp_ue_src_id);
+
+	/*
+	 * If the system has a PCIe complaint RP with AER, turn off translating
+	 * fake RP ereports.
+	 */
+	if (fab_xlate_fake_rp &&
+	    (data->dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) &&
+	    data->aer_off)
+		fab_xlate_fake_rp = B_FALSE;
+}
+
+static int
+fab_prep_pci_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+	    PCI_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint16(erpt, PCI_CONFIG_STATUS, data->pci_err_status);
+	(void) nvlist_add_uint16(erpt, PCI_CONFIG_COMMAND, data->pci_cfg_comm);
+
+	return (err);
+}
+
+static int
+fab_prep_pci_bdg_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s-%s",
+	    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint16(erpt, PCI_SEC_CONFIG_STATUS,
+	    data->pci_bdg_sec_stat);
+	(void) nvlist_add_uint16(erpt, PCI_BCNTRL, data->pci_bdg_ctrl);
+
+	return (err);
+}
+
+static int
+fab_prep_pci_bdg_ctl_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+	    PCI_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint16(erpt, PCI_SEC_CONFIG_STATUS,
+	    data->pci_bdg_sec_stat);
+	(void) nvlist_add_uint16(erpt, PCI_BCNTRL, data->pci_bdg_ctrl);
+
+	return (err);
+}
+
+
+static int
+fab_prep_pcie_ce_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+	    PCIEX_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint16(erpt, PCIEX_DEVSTS_REG, data->pcie_err_status);
+	(void) nvlist_add_uint32(erpt, PCIEX_CE_STATUS_REG,
+	    data->pcie_ce_status);
+
+	return (err);
+}
+
+static int
+fab_prep_pcie_ue_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	uint32_t first_err = 1 << (data->pcie_adv_ctl &
+	    PCIE_AER_CTL_FST_ERR_PTR_MASK);
+	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+	    PCIEX_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint16(erpt, PCIEX_DEVSTS_REG, data->pcie_err_status);
+	(void) nvlist_add_uint32(erpt, PCIEX_UE_STATUS_REG,
+	    data->pcie_ue_status);
+	(void) nvlist_add_uint32(erpt, PCIEX_UE_SEV_REG, data->pcie_ue_sev);
+	(void) nvlist_add_uint32(erpt, PCIEX_ADV_CTL, data->pcie_adv_ctl);
+
+	fmd_hdl_debug(hdl, "Bit 0x%x First Err 0x%x", tbl->reg_bit, first_err);
+
+	if ((tbl->reg_bit == first_err) && data->pcie_ue_tgt_bdf) {
+		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID,
+		    data->pcie_ue_tgt_bdf);
+		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_TRUE);
+	} else {
+		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID, 0);
+		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_FALSE);
+	}
+
+	if ((tbl->reg_bit == first_err) && !data->pcie_ue_no_tgt_erpt &&
+	    data->pcie_ue_tgt_trans) {
+		if (tbl->tgt_class)
+			fab_send_tgt_erpt(hdl, data, tbl->tgt_class, B_TRUE);
+	}
+
+	return (err);
+}
+
+static int
+fab_prep_pcie_sue_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	uint32_t first_err = 1 << (data->pcie_sue_ctl &
+	    PCIE_AER_SCTL_FST_ERR_PTR_MASK);
+	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+	    PCIEX_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint32(erpt, PCIEX_SEC_UE_STATUS,
+	    data->pcie_sue_status);
+
+	fmd_hdl_debug(hdl, "Bit 0x%x First Err 0x%x", tbl->reg_bit, first_err);
+
+	if ((tbl->reg_bit == first_err) && data->pcie_sue_tgt_bdf) {
+		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID,
+		    data->pcie_sue_tgt_bdf);
+		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_TRUE);
+	} else {
+		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID, 0);
+		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_FALSE);
+	}
+
+	if ((tbl->reg_bit == first_err) && !data->pcie_ue_no_tgt_erpt &&
+	    data->pcie_sue_tgt_trans) {
+		if (tbl->tgt_class)
+			fab_send_tgt_erpt(hdl, data, tbl->tgt_class, B_FALSE);
+	}
+
+	return (err);
+}
+
+static int
+fab_prep_pcix_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	int err = 0;
+
+	/* Only send if this is not a bridge */
+	if (!data->pcix_status || data->pcix_bdg_sec_stat)
+		return (1);
+
+	err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+	    PCIX_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint8(erpt, PCIX_COMMAND, data->pcix_command);
+	(void) nvlist_add_uint32(erpt, PCIX_STATUS, data->pcix_status);
+
+	return (err);
+}
+
+static void
+fab_send_pcix_ecc_erpt(fmd_hdl_t *hdl, fab_data_t *data)
+{
+	nvlist_t *erpt;
+	int ecc_phase = (data->pcix_ecc_status_0 & PCI_PCIX_ECC_PHASE) >> 0x4;
+	int ecc_corr = data->pcix_ecc_status_0 & PCI_PCIX_ECC_CORR;
+	int sec_ue = data->pcix_ecc_status_0 & PCI_PCIX_ECC_S_UE;
+	int sec_ce = data->pcix_ecc_status_0 & PCI_PCIX_ECC_S_CE;
+	uint32_t ctlstat = (data->pcix_ecc_control_0 << 16) |
+	    data->pcix_ecc_status_0;
+
+	switch (ecc_phase) {
+	case PCI_PCIX_ECC_PHASE_NOERR:
+		break;
+	case PCI_PCIX_ECC_PHASE_FADDR:
+	case PCI_PCIX_ECC_PHASE_SADDR:
+		(void) snprintf(fab_buf, FM_MAX_CLASS,
+		    "%s.%s", PCIX_ERROR_SUBCLASS,
+		    ecc_corr ? PCIX_ECC_CE_ADDR : PCIX_ECC_UE_ADDR);
+		break;
+	case PCI_PCIX_ECC_PHASE_ATTR:
+		(void) snprintf(fab_buf, FM_MAX_CLASS,
+		    "%s.%s", PCIX_ERROR_SUBCLASS,
+		    ecc_corr ? PCIX_ECC_CE_ATTR : PCIX_ECC_UE_ATTR);
+		break;
+	case PCI_PCIX_ECC_PHASE_DATA32:
+	case PCI_PCIX_ECC_PHASE_DATA64:
+		(void) snprintf(fab_buf, FM_MAX_CLASS,
+		    "%s.%s", PCIX_ERROR_SUBCLASS,
+		    ecc_corr ? PCIX_ECC_CE_DATA : PCIX_ECC_UE_DATA);
+		break;
+	}
+
+	if (ecc_phase) {
+		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
+			goto done;
+		(void) fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+		(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+		(void) nvlist_add_uint16(erpt, PCIX_COMMAND,
+		    data->pcix_command);
+		(void) nvlist_add_uint32(erpt, PCIX_STATUS, data->pcix_status);
+		(void) nvlist_add_uint32(erpt, PCIX_ECC_CTLSTAT, ctlstat);
+		(void) nvlist_add_uint32(erpt, PCIX_ECC_ATTR,
+		    data->pcix_ecc_attr_0);
+		fmd_hdl_debug(hdl, "Sending ecc ereport: %s\n", fab_buf);
+		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
+		if (fmd_xprt_error(hdl, fab_fmd_xprt))
+			fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
+	}
+
+	if (sec_ce || sec_ue) {
+		(void) snprintf(fab_buf, FM_MAX_CLASS,
+		    "%s.%s", PCIX_ERROR_SUBCLASS,
+		    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
+		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
+			goto done;
+		(void) fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+		(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+		(void) nvlist_add_uint16(erpt, PCIX_COMMAND,
+		    data->pcix_command);
+		(void) nvlist_add_uint32(erpt, PCIX_STATUS, data->pcix_status);
+		(void) nvlist_add_uint32(erpt, PCIX_ECC_CTLSTAT, ctlstat);
+		(void) nvlist_add_uint32(erpt, PCIX_ECC_ATTR,
+		    data->pcix_ecc_attr_0);
+		fmd_hdl_debug(hdl, "Sending ecc ereport: %s\n", fab_buf);
+		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
+		if (fmd_xprt_error(hdl, fab_fmd_xprt))
+			fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
+	}
+
+	return;
+done:
+	fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
+}
+
+static int
+fab_prep_pcix_bdg_sec_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s%s",
+	    PCIX_ERROR_SUBCLASS, PCIX_SEC_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint16(erpt, PCIX_SEC_STATUS,
+	    data->pcix_bdg_sec_stat);
+	(void) nvlist_add_uint32(erpt, PCIX_BDG_STAT, data->pcix_bdg_stat);
+
+	return (err);
+}
+
+static int
+fab_prep_pcix_bdg_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	int err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+	    PCIX_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint16(erpt, PCIX_SEC_STATUS,
+	    data->pcix_bdg_sec_stat);
+	(void) nvlist_add_uint32(erpt, PCIX_BDG_STAT, data->pcix_bdg_stat);
+
+	return (err);
+}
+
+static void
+fab_send_pcix_bdg_ecc_erpt(fmd_hdl_t *hdl, fab_data_t *data)
+{
+	nvlist_t *erpt;
+	int ecc_phase = (data->pcix_ecc_status_1 & PCI_PCIX_ECC_PHASE) >> 0x4;
+	int ecc_corr = data->pcix_ecc_status_1 & PCI_PCIX_ECC_CORR;
+	int sec_ue = data->pcix_ecc_status_1 & PCI_PCIX_ECC_S_UE;
+	int sec_ce = data->pcix_ecc_status_1 & PCI_PCIX_ECC_S_CE;
+	uint32_t ctlstat = (data->pcix_ecc_control_1 << 16) |
+	    data->pcix_ecc_status_1;
+
+	switch (ecc_phase) {
+	case PCI_PCIX_ECC_PHASE_NOERR:
+		break;
+	case PCI_PCIX_ECC_PHASE_FADDR:
+	case PCI_PCIX_ECC_PHASE_SADDR:
+		(void) snprintf(fab_buf, FM_MAX_CLASS,
+		    "%s.%s%s", PCIX_ERROR_SUBCLASS, PCIX_SEC_ERROR_SUBCLASS,
+		    ecc_corr ? PCIX_ECC_CE_ADDR : PCIX_ECC_UE_ADDR);
+		break;
+	case PCI_PCIX_ECC_PHASE_ATTR:
+		(void) snprintf(fab_buf, FM_MAX_CLASS,
+		    "%s.%s%s", PCIX_ERROR_SUBCLASS, PCIX_SEC_ERROR_SUBCLASS,
+		    ecc_corr ? PCIX_ECC_CE_ATTR : PCIX_ECC_UE_ATTR);
+		break;
+	case PCI_PCIX_ECC_PHASE_DATA32:
+	case PCI_PCIX_ECC_PHASE_DATA64:
+		(void) snprintf(fab_buf, FM_MAX_CLASS,
+		    "%s.%s%s", PCIX_ERROR_SUBCLASS, PCIX_SEC_ERROR_SUBCLASS,
+		    ecc_corr ? PCIX_ECC_CE_DATA : PCIX_ECC_UE_DATA);
+		break;
+	}
+	if (ecc_phase) {
+		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
+			goto done;
+		(void) fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+		(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+		(void) nvlist_add_uint16(erpt, PCIX_SEC_STATUS,
+		    data->pcix_bdg_sec_stat);
+		(void) nvlist_add_uint32(erpt, PCIX_BDG_STAT,
+		    data->pcix_bdg_stat);
+		(void) nvlist_add_uint32(erpt, PCIX_ECC_CTLSTAT, ctlstat);
+		(void) nvlist_add_uint32(erpt, PCIX_ECC_ATTR,
+		    data->pcix_ecc_attr_1);
+		fmd_hdl_debug(hdl, "Sending ecc ereport: %s\n", fab_buf);
+		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
+		if (fmd_xprt_error(hdl, fab_fmd_xprt))
+			fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
+	}
+
+	if (sec_ce || sec_ue) {
+		(void) snprintf(fab_buf, FM_MAX_CLASS,
+		    "%s.%s%s", PCIX_ERROR_SUBCLASS, PCIX_SEC_ERROR_SUBCLASS,
+		    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
+		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
+			goto done;
+		(void) fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+		(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+		(void) nvlist_add_uint16(erpt, PCIX_SEC_STATUS,
+		    data->pcix_bdg_sec_stat);
+		(void) nvlist_add_uint32(erpt, PCIX_BDG_STAT,
+		    data->pcix_bdg_stat);
+		(void) nvlist_add_uint32(erpt, PCIX_ECC_CTLSTAT, ctlstat);
+		(void) nvlist_add_uint32(erpt, PCIX_ECC_ATTR,
+		    data->pcix_ecc_attr_1);
+		fmd_hdl_debug(hdl, "Sending ecc ereport: %s\n", fab_buf);
+		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
+		if (fmd_xprt_error(hdl, fab_fmd_xprt))
+			fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
+	}
+	return;
+done:
+	fmd_hdl_debug(hdl, "Failed to send ECC ereport\n");
+}
+
+static int
+fab_prep_pcie_nadv_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	int err = 0;
+
+	/* Don't send this for PCI device, Root Ports, or PCIe with AER */
+	if ((data->dev_type == PCIE_PCIECAP_DEV_TYPE_PCI_DEV) ||
+	    (data->dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) ||
+	    data->aer_off)
+		return (1);
+
+	err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+	    PCIEX_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint16(erpt, PCIEX_DEVSTS_REG, data->pcie_err_status);
+
+	return (err);
+}
+
+static int
+fab_prep_pcie_rc_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	uint32_t status = data->pcie_rp_err_status;
+	int err = 0;
+	int isFE = 0, isNFE = 0;
+
+	fmd_hdl_debug(hdl, "XLATE RP Error Class %s", class);
+
+	if (!data->aer_off)
+		return (-1);
+
+	/* Only send a FE Msg if the 1st UE error is FE */
+	if (STRCMP(class, PCIEX_RC_FE_MSG))
+		if (!(status & PCIE_AER_RE_STS_FIRST_UC_FATAL))
+			return (-1);
+		else
+			isFE = 1;
+
+	/* Only send a NFE Msg is the 1st UE error is NFE */
+	if (STRCMP(class, PCIEX_RC_NFE_MSG))
+		if (status & PCIE_AER_RE_STS_FIRST_UC_FATAL)
+			return (-1);
+		else
+			isNFE = 1;
+
+	fmd_hdl_debug(hdl, "XLATE RP Error");
+
+	err |= fab_prep_basic_erpt(hdl, data->nvl, erpt, B_FALSE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+	    PCIEX_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	(void) nvlist_add_uint32(erpt, PCIEX_ROOT_ERRSTS_REG, status);
+	if ((isFE || isNFE) && data->pcie_rp_ue_src_id) {
+		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID,
+		    data->pcie_rp_ue_src_id);
+		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_TRUE);
+	}
+	if (STRCMP(class, PCIEX_RC_CE_MSG) && data->pcie_rp_ce_src_id) {
+		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID,
+		    data->pcie_rp_ce_src_id);
+		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_TRUE);
+	}
+
+	return (err);
+}
+
+static int
+fab_prep_pcie_fake_rc_erpt(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    fab_erpt_tbl_t *tbl)
+{
+	const char *class = tbl->err_class;
+	uint32_t rc_err_sts = 0;
+	int err = 0;
+
+	/*
+	 * Don't send this for PCI device or Root Ports.  Only send it on
+	 * systems with non-compliant RPs.
+	 */
+	if ((data->dev_type == PCIE_PCIECAP_DEV_TYPE_PCI_DEV) ||
+	    (data->dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) ||
+	    (!fab_xlate_fake_rp))
+		return (-1);
+
+	err = fab_prep_basic_erpt(hdl, data->nvl, erpt, B_TRUE);
+
+	/* Generate an ereport for this error bit. */
+	(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+	    PCIEX_ERROR_SUBCLASS, class);
+	(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+	/* Send PCIe RC Ereports */
+	if (data->pcie_err_status & PCIE_DEVSTS_CE_DETECTED) {
+		rc_err_sts |= PCIE_AER_RE_STS_CE_RCVD;
+	}
+
+	/* NFE/FE src id takes precedence over CE src id */
+	if (data->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) {
+		rc_err_sts |= PCIE_AER_RE_STS_NFE_MSGS_RCVD;
+		rc_err_sts |= PCIE_AER_RE_STS_FE_NFE_RCVD;
+	}
+	if (data->pcie_err_status & PCIE_DEVSTS_FE_DETECTED) {
+		rc_err_sts |= PCIE_AER_RE_STS_FE_MSGS_RCVD;
+		rc_err_sts |= PCIE_AER_RE_STS_FE_NFE_RCVD;
+	}
+	if ((data->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) &&
+	    (data->pcie_err_status & PCIE_DEVSTS_FE_DETECTED)) {
+		rc_err_sts |= PCIE_AER_RE_STS_FIRST_UC_FATAL;
+		rc_err_sts |= PCIE_AER_RE_STS_MUL_FE_NFE_RCVD;
+	}
+
+	(void) nvlist_add_uint32(erpt, PCIEX_ROOT_ERRSTS_REG, rc_err_sts);
+
+	if (!(rc_err_sts & PCIE_AER_RE_STS_MUL_FE_NFE_RCVD)) {
+		(void) nvlist_add_uint16(erpt, PCIEX_SRC_ID, data->bdf);
+		(void) nvlist_add_boolean_value(erpt, PCIEX_SRC_VALID, B_TRUE);
+	}
+
+	return (err);
+}
+
+void
+fab_xlate_pcie_erpts(fmd_hdl_t *hdl, fab_data_t *data)
+{
+	fab_err_tbl_t *tbl;
+
+	fmd_hdl_debug(hdl, "Sending Ereports Now");
+
+	/* Go through the error logs and send the relavant reports */
+	for (tbl = fab_master_err_tbl; tbl->erpt_tbl; tbl++) {
+		fab_send_erpt(hdl, data, tbl);
+	}
+
+	/* Send PCI-X ECC Ereports */
+	fab_send_pcix_ecc_erpt(hdl, data);
+	fab_send_pcix_bdg_ecc_erpt(hdl, data);
+}
+
+void
+fab_xlate_fabric_erpts(fmd_hdl_t *hdl, nvlist_t *nvl, const char *class)
+{
+	fab_data_t data = {0};
+
+	fmd_hdl_debug(hdl, "fabric ereport received: %s\n", class);
+
+	fab_pci_fabric_to_data(hdl, nvl, &data);
+	fab_xlate_pcie_erpts(hdl, &data);
+}
+
+#define	SET_TBL(n, err, reg, sz) \
+	fab_master_err_tbl[n].erpt_tbl = fab_ ## err ## _erpt_tbl; \
+	fab_master_err_tbl[n].reg_offset = offsetof(fab_data_t, reg); \
+	fab_master_err_tbl[n].reg_size = sz; \
+	fab_master_err_tbl[n].fab_prep = fab_prep_ ## err ## _erpt;
+
+void
+fab_setup_master_table()
+{
+	/* Setup the master error table */
+	fab_master_err_tbl = (fab_err_tbl_t *)calloc(13,
+	    sizeof (fab_err_tbl_t));
+
+	SET_TBL(0, pci,			pci_err_status,	    16);
+	SET_TBL(1, pci_bdg,		pci_bdg_sec_stat,   16);
+	SET_TBL(2, pci_bdg_ctl,		pci_bdg_ctrl,	    16);
+	SET_TBL(3, pcie_ce,		pcie_ce_status,	    32);
+	SET_TBL(4, pcie_ue,		pcie_ue_status,	    32);
+	SET_TBL(5, pcie_sue,		pcie_sue_status,    32);
+	SET_TBL(6, pcix,		pcix_status,	    32);
+	SET_TBL(7, pcix_bdg_sec,	pcix_bdg_sec_stat,  16);
+	SET_TBL(8, pcix_bdg,		pcix_bdg_stat,	    32);
+	SET_TBL(9, pcie_nadv,		pcie_err_status,    16);
+	SET_TBL(10, pcie_rc,		pcie_rp_err_status, 32);
+	SET_TBL(11, pcie_fake_rc,	pcie_err_status,    16);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/fm/modules/common/fabric-xlate/fx_fire.c	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,396 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+#include <strings.h>
+#include <sys/fm/io/sun4_fire.h>
+
+#include "fabric-xlate.h"
+
+typedef struct fab_fire_tbl {
+	const char	*err_class;
+	uint32_t	fire_bit;	/* Fire error bit */
+	uint16_t	pci_err_sts;	/* Equivalent PCI Error Status */
+	uint16_t	pci_bdg_sts;	/* Equivalent PCI Bridge Status */
+} fab_fire_tbl_t;
+
+/*
+ * Translation tables for converting fire error bits into "pci" ereports.
+ * <Fire Bit>
+ * <pci ereport Class>
+ * <pci error status reg>
+ * <pci bridge status reg>
+ * <pci target class>
+ */
+#define	FAB_FIRE_PEC_BIT(fb) "ereport.io." PCIEX_FIRE "." FIRE_PEC_ ## fb
+#define	FAB_FIRE_DMC_BIT(fb) "ereport.io." PCIEX_FIRE "." FIRE_DMC_ ## fb
+#define	FAB_N2_DMU_BIT(fb) "ereport.io.n2.dmu." fb
+#define	FAB_OB_PEC_BIT(fb) "ereport.io." PCIEX_OBERON "." FIRE_PEC_ ## fb
+
+#define	FAB_FIRE_UE(fb, bit, sts, bdg) \
+	FAB_FIRE_PEC_BIT(fb), PCIE_AER_UCE_ ## bit, sts, bdg
+#define	FAB_OB_UE(fb, bit, sts, bdg) \
+	FAB_OB_PEC_BIT(fb), PCIE_AER_UCE_ ## bit, sts, bdg
+static fab_fire_tbl_t fab_fire_pec_ue_tbl[] = {
+	FAB_FIRE_UE(UR,	 UR,	   PCI_STAT_S_SYSERR,	0),
+	FAB_FIRE_UE(UC,	 UC,	   PCI_STAT_S_SYSERR,	0),
+	FAB_OB_UE(ECRC,	 ECRC,	   PCI_STAT_S_SYSERR,	0),
+	FAB_FIRE_UE(CTO, TO,	   PCI_STAT_S_SYSERR,	0),
+	FAB_FIRE_UE(ROF, RO,	   PCI_STAT_S_SYSERR,	0),
+	FAB_FIRE_UE(MFP, MTLP,	   PCI_STAT_S_SYSERR,	0),
+	FAB_FIRE_UE(PP,	 PTLP,	   PCI_STAT_S_PERROR,
+	    (PCI_STAT_S_SYSERR | PCI_STAT_PERROR)),
+	FAB_FIRE_UE(FCP, FCP,	   PCI_STAT_S_SYSERR,	0),
+	FAB_FIRE_UE(DLP, DLP,	   PCI_STAT_S_SYSERR,	0),
+	FAB_FIRE_UE(TE,	 TRAINING, PCI_STAT_S_SYSERR,	0),
+	FAB_FIRE_UE(CA,	 CA,	   PCI_STAT_S_TARG_AB,
+	    PCI_STAT_S_TARG_AB),
+	NULL, NULL, NULL,
+};
+
+#define	FAB_FIRE_CE(fb, bit) \
+	FAB_FIRE_PEC_BIT(fb), PCIE_AER_CE_ ## bit, 0, 0
+static fab_fire_tbl_t fab_fire_pec_ce_tbl[] = {
+	FAB_FIRE_CE(RTO,	REPLAY_TO),
+	FAB_FIRE_CE(RNR,	REPLAY_ROLLOVER),
+	FAB_FIRE_CE(BDP,	BAD_DLLP),
+	FAB_FIRE_CE(BTP,	BAD_TLP),
+	FAB_FIRE_CE(RE,		RECEIVER_ERR),
+	NULL, NULL, NULL,
+};
+
+/*
+ * WUC/RUC will need to be special cased for the target ereports, because you
+ * need to decode the tlp log.
+ */
+#define	FAB_FIRE_WUCRUC(fb) \
+	FAB_FIRE_PEC_BIT(fb), 0, 0, (PCI_STAT_R_MAST_AB | PCI_STAT_S_SYSERR)
+#define	FAB_FIRE_OE(fb, bit) \
+	FAB_FIRE_PEC_BIT(fb), PCIE_AER_UCE_ ## bit, PCI_STAT_S_SYSERR, 0
+#define	FAB_OB_OE(fb, bit) \
+	FAB_FIRE_PEC_BIT(fb), PCIE_AER_UCE_ ## bit, PCI_STAT_S_SYSERR, 0
+static fab_fire_tbl_t fab_fire_pec_oe_tbl[] = {
+	FAB_FIRE_WUCRUC(WUC),
+	FAB_FIRE_WUCRUC(RUC),
+	FAB_FIRE_OE(ERU, DLP),
+	FAB_FIRE_OE(ERO, DLP),
+	FAB_FIRE_OE(EMP, DLP),
+	FAB_FIRE_OE(EPE, DLP),
+	NULL, NULL, NULL,
+};
+
+#define	FAB_FIRE_DMC(fb) \
+	FAB_FIRE_DMC_BIT(fb), PCIE_AER_UCE_CA, 0, PCI_STAT_S_TARG_AB
+#define	FAB_N2_DMU(fb) \
+	FAB_N2_DMU_BIT(fb), PCIE_AER_UCE_CA, 0, PCI_STAT_S_TARG_AB
+static fab_fire_tbl_t fab_fire_dmc_tbl[] = {
+	FAB_FIRE_DMC(BYP_ERR),
+	FAB_FIRE_DMC(BYP_OOR),
+	FAB_FIRE_DMC(TRN_OOR),
+	FAB_FIRE_DMC(TTE_INV),
+	FAB_FIRE_DMC(TTE_PRT),
+	FAB_N2_DMU("iotsbdesc_inv"),
+	FAB_N2_DMU("sun4v_adj_va_uf"),
+	FAB_N2_DMU("sun4v_inv_pg_sz"),
+	FAB_N2_DMU("sun4v_key_err"),
+	FAB_N2_DMU("sun4v_va_oor"),
+	NULL, NULL, NULL
+};
+
+/* ARGSUSED */
+static void
+fab_fire_to_data(fmd_hdl_t *hdl, nvlist_t *nvl, fab_data_t *data)
+{
+	data->nvl = nvl;
+
+	/* Always Root Complex */
+	data->dev_type = PCIE_PCIECAP_DEV_TYPE_ROOT;
+
+	data->pcie_ue_sev = (PCIE_AER_UCE_DLP | PCIE_AER_UCE_SD |
+	    PCIE_AER_UCE_FCP | PCIE_AER_UCE_RO | PCIE_AER_UCE_MTLP);
+}
+
+static int
+fab_xlate_fire_ce(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    const char *class)
+{
+	fab_fire_tbl_t	*entry;
+	uint64_t	reg;
+
+	for (entry = fab_fire_pec_ce_tbl; entry->err_class; entry++) {
+		if (STRCMP(class, entry->err_class))
+			goto send;
+	}
+
+	return (0);
+
+send:
+	fmd_hdl_debug(hdl, "Translate Fire CE %s\n", class);
+
+	/* Fill in the device status register */
+	data->pcie_err_status = PCIE_DEVSTS_CE_DETECTED;
+
+	/* Fill in the AER CE register */
+	if (nvlist_lookup_uint64(erpt, "tlu-cess", &reg) == 0) {
+		data->pcie_ce_status = (uint32_t)reg | (uint32_t)(reg >> 32);
+	}
+
+	return (1);
+}
+
+static int
+fab_xlate_fire_ue(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    const char *class)
+{
+	fab_fire_tbl_t	*entry;
+	uint64_t	reg;
+	uint32_t	temp;
+	pcie_tlp_hdr_t	*hdr;
+
+	for (entry = fab_fire_pec_ue_tbl; entry->err_class; entry++) {
+		if (STRCMP(class, entry->err_class))
+			goto send;
+	}
+
+	return (0);
+
+send:
+	fmd_hdl_debug(hdl, "Translate Fire UE %s\n", class);
+
+	/* Fill in PCI Status Register */
+	data->pci_err_status = entry->pci_err_sts;
+	data->pci_bdg_sec_stat = entry->pci_bdg_sts;
+
+	/* Fill in the device status register */
+	if (entry->fire_bit & data->pcie_ue_sev)
+		data->pcie_err_status = PCIE_DEVSTS_FE_DETECTED;
+	else
+		data->pcie_err_status = PCIE_DEVSTS_NFE_DETECTED;
+
+	if (entry->fire_bit == PCIE_AER_UCE_UR)
+		data->pcie_err_status |= PCIE_DEVSTS_UR_DETECTED;
+
+	/* Fill in the AER UE register */
+	if (nvlist_lookup_uint64(erpt, "tlu-uess", &reg) == 0) {
+		data->pcie_ue_status = (uint32_t)reg | (uint32_t)(reg >> 32);
+	}
+
+	/* Fill in the AER Control register */
+	if ((reg & (uint64_t)entry->fire_bit) &&
+	    nvlist_lookup_boolean(erpt, "primary")) {
+		temp = entry->fire_bit;
+		for (data->pcie_adv_ctl = (uint32_t)-1; temp;
+		    data->pcie_adv_ctl++)
+			temp = temp >> 1;
+	}
+
+	/* If CTO create target information */
+	if (entry->fire_bit == PCIE_AER_UCE_TO &&
+	    nvlist_lookup_boolean(erpt, "primary")) {
+		if (nvlist_lookup_uint64(erpt, "tlu-tueh1l", &reg) == 0) {
+			data->pcie_ue_hdr[0] = (uint32_t)(reg >> 32);
+			data->pcie_ue_hdr[1] = (uint32_t)(reg);
+		}
+		if (nvlist_lookup_uint64(erpt, "tlu-tueh2l", &reg) == 0) {
+			data->pcie_ue_hdr[2] = (uint32_t)(reg >> 32);
+			data->pcie_ue_hdr[3] = (uint32_t)(reg);
+		}
+
+		hdr = (pcie_tlp_hdr_t *)(&data->pcie_ue_hdr[0]);
+		switch (hdr->type) {
+		case PCIE_TLP_TYPE_IO:
+		case PCIE_TLP_TYPE_MEM:
+		case PCIE_TLP_TYPE_MEMLK:
+			data->pcie_ue_tgt_trans = PF_ADDR_PIO;
+			if (hdr->fmt & 0x1) {
+				data->pcie_ue_tgt_addr = reg;
+			} else {
+				data->pcie_ue_tgt_addr = data->pcie_ue_hdr[2];
+			}
+			break;
+		case PCIE_TLP_TYPE_CFG0:
+		case PCIE_TLP_TYPE_CFG1:
+			data->pcie_ue_tgt_trans = PF_ADDR_CFG;
+			data->pcie_ue_tgt_bdf = data->pcie_ue_hdr[2] >> 16;
+			break;
+		}
+	}
+
+	/* Fill in the AER Header registers */
+	if (nvlist_lookup_uint64(erpt, "tlu-rueh1l", &reg) == 0) {
+		data->pcie_ue_hdr[0] = (uint32_t)(reg >> 32);
+		data->pcie_ue_hdr[1] = (uint32_t)(reg);
+	}
+	if (nvlist_lookup_uint64(erpt, "tlu-rueh2l", &reg) == 0) {
+		data->pcie_ue_hdr[2] = (uint32_t)(reg >> 32);
+		data->pcie_ue_hdr[3] = (uint32_t)(reg);
+	}
+
+	return (1);
+}
+
+static int
+fab_xlate_fire_oe(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    const char *class)
+{
+	fab_fire_tbl_t	*entry;
+	uint64_t	reg;
+
+	for (entry = fab_fire_pec_oe_tbl; entry->err_class; entry++) {
+		if (STRCMP(class, entry->err_class))
+			goto send;
+	}
+
+	return (0);
+
+send:
+	fmd_hdl_debug(hdl, "Translate Fire OE %s\n", class);
+
+	/* Fill in PCI Status Register */
+	if (entry->fire_bit) {
+		data->pci_err_status = entry->pci_err_sts;
+		data->pci_bdg_sec_stat = entry->pci_bdg_sts;
+	} else {
+		if (nvlist_lookup_uint64(erpt, "tlu-roeeh1l", &reg) == 0) {
+			data->pcie_ue_hdr[0] = (uint32_t)(reg >> 32);
+			data->pcie_ue_hdr[1] = (uint32_t)(reg);
+		}
+		if (nvlist_lookup_uint64(erpt, "tlu-roeeh2l", &reg) == 0) {
+			data->pcie_ue_hdr[2] = (uint32_t)(reg >> 32);
+			data->pcie_ue_hdr[3] = (uint32_t)(reg);
+		}
+
+		if (((pcie_tlp_hdr_t *)(&data->pcie_ue_hdr[0]))->type ==
+		    PCIE_TLP_TYPE_CPL) {
+			pcie_cpl_t *cpl = (pcie_cpl_t *)&data->pcie_ue_hdr[1];
+			switch (cpl->status) {
+			case PCIE_CPL_STS_UR:
+				data->pci_err_status = 0;
+				data->pci_bdg_sec_stat = PCI_STAT_R_MAST_AB |
+				    PCI_STAT_S_SYSERR;
+				break;
+			case PCIE_CPL_STS_CA:
+				data->pci_err_status = 0;
+				data->pci_bdg_sec_stat = PCI_STAT_R_TARG_AB |
+				    PCI_STAT_S_SYSERR;
+				break;
+			}
+		}
+	}
+
+	/* Fill in the device status register */
+	if (entry->fire_bit & data->pcie_ue_sev)
+		data->pcie_err_status = PCIE_DEVSTS_FE_DETECTED;
+	else
+		data->pcie_err_status = PCIE_DEVSTS_NFE_DETECTED;
+
+	/* Fill in the AER UE register */
+	data->pcie_ue_status = entry->fire_bit;
+
+	return (1);
+}
+
+static int
+fab_xlate_fire_dmc(fmd_hdl_t *hdl, fab_data_t *data, nvlist_t *erpt,
+    const char *class)
+{
+	fab_fire_tbl_t	*entry;
+	uint64_t	reg;
+	uint32_t	temp;
+
+	for (entry = fab_fire_dmc_tbl; entry->err_class; entry++) {
+		fmd_hdl_debug(hdl, "Matching %s\n", entry->err_class);
+		if (STRCMP(class, entry->err_class) &&
+		    nvlist_lookup_boolean(erpt, "primary"))
+			goto send;
+	}
+
+	return (0);
+
+send:
+	fmd_hdl_debug(hdl, "Translate Fire DMC %s\n", class);
+
+	/* Fill in PCI Status Register */
+	data->pci_err_status = entry->pci_err_sts;
+	data->pci_bdg_sec_stat = entry->pci_bdg_sts;
+
+	/* Fill in the device status register */
+	data->pcie_err_status = PCIE_DEVSTS_NFE_DETECTED;
+
+	/* Fill in the AER UE register */
+	data->pcie_ue_status = entry->fire_bit;
+
+	/* Fill in the AER Control register */
+	temp = entry->fire_bit;
+	for (data->pcie_adv_ctl = (uint32_t)-1; temp; data->pcie_adv_ctl++)
+		temp = temp >> 1;
+
+	/* Fill in the AER Header registers */
+	if (nvlist_lookup_uint64(erpt, "mmu-tfsr", &reg) == 0) {
+		fmd_hdl_debug(hdl, "tfsr 0x%llx\n", reg);
+		/* Get the trans type */
+		temp = (reg & 0x3F0000) >> 16;
+		data->pcie_ue_hdr[0] = (uint32_t)(temp << 24);
+		data->pcie_ue_tgt_trans = PF_ADDR_DMA;
+		/* Get the req id */
+		temp = (reg & 0xFFFF);
+		data->pcie_ue_hdr[1] = (uint32_t)(temp << 16);
+		data->pcie_ue_tgt_bdf = temp;
+	}
+
+	if (nvlist_lookup_uint64(erpt, "mmu-tfar", &reg) == 0) {
+		fmd_hdl_debug(hdl, "tfar 0x%llx\n", reg);
+		/* Get the address */
+		data->pcie_ue_hdr[2] = reg;
+		data->pcie_ue_hdr[3] = 0;
+		data->pcie_ue_tgt_addr = reg;
+	}
+
+	fmd_hdl_debug(hdl, "HEADER 0 0x%x\n", data->pcie_ue_hdr[0]);
+	fmd_hdl_debug(hdl, "HEADER 1 0x%x\n", data->pcie_ue_hdr[1]);
+	fmd_hdl_debug(hdl, "HEADER 2 0x%x\n", data->pcie_ue_hdr[2]);
+	fmd_hdl_debug(hdl, "HEADER 3 0x%x\n", data->pcie_ue_hdr[3]);
+
+	return (1);
+}
+
+void
+fab_xlate_fire_erpts(fmd_hdl_t *hdl, nvlist_t *nvl, const char *class)
+{
+	fab_data_t data = {0};
+
+	fmd_hdl_debug(hdl, "Fire RC ereport received: %s\n", class);
+
+	fab_fire_to_data(hdl, nvl, &data);
+
+	if (fmd_nvl_class_match(hdl, nvl, "ereport.io.fire.pec.*")) {
+		if (! fab_xlate_fire_ce(hdl, &data, nvl, class) &&
+		    ! fab_xlate_fire_ue(hdl, &data, nvl, class))
+			(void) fab_xlate_fire_oe(hdl, &data, nvl, class);
+	} else if (fmd_nvl_class_match(hdl, nvl, "ereport.io.fire.dmc.*") ||
+	    fmd_nvl_class_match(hdl, nvl, "ereport.io.n2.dmu.*"))
+		(void) fab_xlate_fire_dmc(hdl, &data, nvl, class);
+
+	fab_xlate_pcie_erpts(hdl, &data);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/fm/modules/common/fabric-xlate/fx_subr.c	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,837 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+#include <strings.h>
+#include <fm/topo_hc.h>
+#include <sys/fm/util.h>
+#include <libxml/xpath.h>
+#include <libxml/parser.h>
+#include <libxml/xpathInternals.h>
+#include <libxml/tree.h>
+
+#include "fabric-xlate.h"
+
+#define	HAS_PROP(node, name) xmlHasProp(node, (const xmlChar *)name)
+#define	GET_PROP(node, name) ((char *)xmlGetProp(node, (const xmlChar *)name))
+#define	FREE_PROP(prop) xmlFree((xmlChar *)prop)
+
+extern xmlXPathContextPtr fab_xpathCtx;
+
+/* ARGSUSED */
+int
+fab_prep_basic_erpt(fmd_hdl_t *hdl, nvlist_t *nvl, nvlist_t *erpt,
+    boolean_t isRC)
+{
+	uint64_t	*now;
+	uint64_t	ena;
+	uint_t		nelem;
+	nvlist_t	*detector, *new_detector;
+	char		rcpath[255];
+	int		err = 0;
+
+	/* Grab the tod, ena and detector(FMRI) */
+	err |= nvlist_lookup_uint64_array(nvl, "__tod", &now, &nelem);
+	err |= nvlist_lookup_uint64(nvl, "ena", &ena);
+	err |= nvlist_lookup_nvlist(nvl, FM_EREPORT_DETECTOR, &detector);
+	if (err)
+		return (err);
+
+	/* Make a copy of the detector */
+	err = nvlist_dup(detector, &new_detector, NV_UNIQUE_NAME);
+	if (err)
+		return (err);
+
+	/* Copy the tod and ena to erpt */
+	(void) nvlist_add_uint64(erpt, FM_EREPORT_ENA, ena);
+	(void) nvlist_add_uint64_array(erpt, "__tod", now, nelem);
+
+	/*
+	 * Create the correct ROOT FMRI from PCIe leaf fabric ereports.	 Used
+	 * only by fab_prep_fake_rc_erpt.  See the fab_pciex_fake_rc_erpt_tbl
+	 * comments for more information.
+	 */
+	if (isRC && fab_get_rcpath(hdl, nvl, rcpath)) {
+		/* Create the correct PCIe RC new_detector aka FMRI */
+		(void) nvlist_remove(new_detector, FM_FMRI_DEV_PATH,
+		    DATA_TYPE_STRING);
+		(void) nvlist_add_string(new_detector, FM_FMRI_DEV_PATH,
+		    rcpath);
+	}
+
+	/* Copy the FMRI to erpt */
+	(void) nvlist_add_nvlist(erpt, FM_EREPORT_DETECTOR, new_detector);
+
+	nvlist_free(new_detector);
+	return (err);
+}
+
+void
+fab_send_tgt_erpt(fmd_hdl_t *hdl, fab_data_t *data, const char *class,
+    boolean_t isPrimary)
+{
+	nvlist_t	*nvl = data->nvl;
+	nvlist_t	*erpt;
+	char		*fmri = NULL;
+	uint32_t	tgt_trans;
+	uint64_t	tgt_addr;
+	uint16_t	tgt_bdf;
+
+	if (isPrimary) {
+		tgt_trans = data->pcie_ue_tgt_trans;
+		tgt_addr = data->pcie_ue_tgt_addr;
+		tgt_bdf = data->pcie_ue_tgt_bdf;
+	} else {
+		tgt_trans = data->pcie_sue_tgt_trans;
+		tgt_addr = data->pcie_sue_tgt_addr;
+		tgt_bdf = data->pcie_sue_tgt_bdf;
+	}
+
+	fmd_hdl_debug(hdl, "Sending Target Ereport: "
+	    "type 0x%x addr 0x%llx fltbdf 0x%x\n",
+	    tgt_trans, tgt_addr, tgt_bdf);
+
+	if (!tgt_trans)
+		return;
+
+	if ((tgt_trans == PF_ADDR_PIO) && tgt_addr)
+		fmri = fab_find_addr(hdl, nvl, tgt_addr);
+	else if ((tgt_trans == PF_ADDR_CFG || (tgt_trans == PF_ADDR_DMA)) &&
+	    tgt_bdf)
+		fmri = fab_find_bdf(hdl, nvl, tgt_bdf);
+
+	if (fmri) {
+		uint64_t	*now;
+		uint64_t	ena;
+		uint_t		nelem;
+		nvlist_t	*detector;
+		int		err = 0;
+
+		/* Allocate space for new erpt */
+		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
+			goto done;
+
+		/* Generate the target ereport class */
+		(void) snprintf(fab_buf, FM_MAX_CLASS, "ereport.io.%s.%s",
+		    PCI_ERROR_SUBCLASS, class);
+		(void) nvlist_add_string(erpt, FM_CLASS, fab_buf);
+
+		/* Grab the tod, ena and detector(FMRI) */
+		err |= nvlist_lookup_uint64_array(nvl, "__tod", &now, &nelem);
+		err |= nvlist_lookup_uint64(nvl, "ena", &ena);
+
+		/* Copy the tod and ena to erpt */
+		(void) nvlist_add_uint64(erpt, FM_EREPORT_ENA, ena);
+		(void) nvlist_add_uint64_array(erpt, "__tod", now, nelem);
+
+		/* Create the correct FMRI */
+		if (nvlist_alloc(&detector, NV_UNIQUE_NAME, 0) != 0) {
+			nvlist_free(erpt);
+			goto done;
+		}
+		(void) nvlist_add_uint8(detector, FM_VERSION,
+		    FM_DEV_SCHEME_VERSION);
+		(void) nvlist_add_string(detector, FM_FMRI_SCHEME,
+		    FM_FMRI_SCHEME_DEV);
+		(void) nvlist_add_string(detector, FM_FMRI_DEV_PATH, fmri);
+		(void) nvlist_add_nvlist(erpt, FM_EREPORT_DETECTOR, detector);
+		nvlist_free(detector);
+
+		/* Add the address payload */
+		(void) nvlist_add_uint64(erpt, PCI_PA, tgt_addr);
+
+		fmd_hdl_debug(hdl, "Sending target ereport: %s 0x%x\n",
+		    fab_buf, tgt_addr);
+		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
+		if (fmd_xprt_error(hdl, fab_fmd_xprt))
+			goto done;
+		fmd_hdl_strfree(hdl, fmri);
+	} else {
+		fmd_hdl_debug(hdl,
+		    "Cannot find Target FMRI addr:0x%llx bdf 0x%x\n",
+		    tgt_addr, tgt_bdf);
+	}
+
+	return;
+done:
+	if (fmri)
+		xmlFree(fmri);
+	fmd_hdl_debug(hdl, "Failed to send Target PCI ereport\n");
+}
+
+void
+fab_send_erpt(fmd_hdl_t *hdl, fab_data_t *data, fab_err_tbl_t *tbl)
+{
+	fab_erpt_tbl_t	*erpt_tbl, *entry;
+	nvlist_t	*erpt;
+	uint32_t	reg;
+
+	erpt_tbl = tbl->erpt_tbl;
+	if (tbl->reg_size == 16) {
+		reg = (uint32_t)*((uint16_t *)
+		    ((uint32_t)data + tbl->reg_offset));
+	} else {
+		reg = *((uint32_t *)((uint32_t)data + tbl->reg_offset));
+	}
+
+	for (entry = erpt_tbl; entry->err_class; entry++) {
+		if (!(reg & entry->reg_bit))
+			continue;
+
+		if (nvlist_alloc(&erpt, NV_UNIQUE_NAME, 0) != 0)
+			goto done;
+		if (tbl->fab_prep(hdl, data, erpt, entry) != 0) {
+			fmd_hdl_debug(hdl, "Prepping ereport failed: "
+			    "class = %s\n", entry->err_class);
+			nvlist_free(erpt);
+			continue;
+		}
+
+		fmd_hdl_debug(hdl, "Sending ereport: %s 0x%x\n", fab_buf, reg);
+		fmd_xprt_post(hdl, fab_fmd_xprt, erpt, 0);
+		if (fmd_xprt_error(hdl, fab_fmd_xprt)) {
+			fmd_hdl_debug(hdl, "Failed to send PCI ereport\n");
+			return;
+		}
+	}
+
+	return;
+done:
+	fmd_hdl_debug(hdl, "Failed  to send PCI ereport\n");
+}
+
+char *
+fab_xpath_query(fmd_hdl_t *hdl, const char *query)
+{
+	xmlXPathObjectPtr xpathObj;
+	xmlNodeSetPtr nodes;
+	char *temp, *res;
+
+	fmd_hdl_debug(hdl, "xpathObj query %s\n", query);
+
+	xpathObj = xmlXPathEvalExpression((const xmlChar *)query,
+	    fab_xpathCtx);
+
+	if (xpathObj == NULL)
+		return (NULL);
+
+	fmd_hdl_debug(hdl, "xpathObj 0x%p type %d\n", xpathObj,
+	    xpathObj->type);
+	nodes = xpathObj->nodesetval;
+
+	if (nodes) {
+		temp = (char *)xmlNodeGetContent(nodes->nodeTab[0]);
+		fmd_hdl_debug(hdl, "query result: %s\n", temp);
+		res = fmd_hdl_strdup(hdl, temp, FMD_SLEEP);
+		xmlFree(temp);
+		xmlXPathFreeObject(xpathObj);
+		return (res);
+	}
+	xmlXPathFreeObject(xpathObj);
+	return (NULL);
+}
+
+#define	FAB_HC2DEV_QUERY_SIZE_MIN 160
+#define	FAB_HC2DEV_QUERY_SIZE(sz) \
+	((sz + FAB_HC2DEV_QUERY_SIZE_MIN) * sizeof (char))
+
+/*
+ * hc_path is in form of "/motherboard=0/hostbridge=0/pciexrc=0"
+ */
+boolean_t
+fab_hc2dev(fmd_hdl_t *hdl, const char *hc_path, char **dev_path)
+{
+	char *query;
+	uint_t len = FAB_HC2DEV_QUERY_SIZE_MIN + strlen(hc_path);
+
+	query = fmd_hdl_alloc(hdl, len, FMD_SLEEP);
+	(void) snprintf(query, len, "//propval[@name='resource' and contains("
+	    "substring(@value, string-length(@value) - %d + 1), '%s')]"
+	    "/parent::*/following-sibling::*/propval[@name='dev']/@value",
+	    strlen(hc_path) + 1, hc_path);
+
+	*dev_path = fab_xpath_query(hdl, query);
+
+	fmd_hdl_free(hdl, query, len);
+
+	return (*dev_path != NULL);
+}
+
+static boolean_t
+fab_hc_path(fmd_hdl_t *hdl, nvlist_t *detector, char **hcpath, size_t *lenp)
+{
+	char c, *name, *id, *buf;
+	uint_t i, size;
+	nvlist_t **hcl;
+	size_t len = 0, buf_size = 0;
+
+	if (nvlist_lookup_nvlist_array(detector, FM_FMRI_HC_LIST, &hcl,
+	    &size) != 0)
+		return (B_FALSE);
+
+	for (i = 0; i < size; i++) {
+		if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, &name) != 0)
+			return (B_FALSE);
+		if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &id) != 0)
+			return (B_FALSE);
+		buf_size += snprintf(&c, 1, "/%s=%s", name, id);
+	}
+
+	buf_size++;
+	buf = fmd_hdl_alloc(hdl, buf_size, FMD_SLEEP);
+
+	for (i = 0; i < size; i++) {
+		(void) nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, &name);
+		(void) nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &id);
+		len += snprintf(buf + len, buf_size - len, "/%s=%s", name, id);
+	}
+
+	*hcpath = buf;
+	*lenp = buf_size;
+
+	return (B_TRUE);
+}
+
+boolean_t
+fab_hc2dev_nvl(fmd_hdl_t *hdl, nvlist_t *detector, char **dev_path)
+{
+	char *hcl;
+	size_t len;
+
+	if (! fab_hc_path(hdl, detector, &hcl, &len))
+		return (B_FALSE);
+
+	(void) fab_hc2dev(hdl, hcl, dev_path);
+
+	fmd_hdl_free(hdl, hcl, len);
+
+	return (*dev_path != NULL);
+}
+
+boolean_t
+fab_get_hcpath(fmd_hdl_t *hdl, nvlist_t *nvl, char **hcpath, size_t *len)
+{
+	nvlist_t *detector;
+	char *scheme;
+
+	if (nvlist_lookup_nvlist(nvl, FM_EREPORT_DETECTOR, &detector) != 0 ||
+	    nvlist_lookup_string(detector, FM_FMRI_SCHEME, &scheme) != 0 ||
+	    ! STRCMP(scheme, FM_FMRI_SCHEME_HC))
+		return (B_FALSE);
+
+	return (fab_hc_path(hdl, detector, hcpath, len));
+}
+
+char *
+fab_find_rppath_by_df(fmd_hdl_t *hdl, nvlist_t *nvl, uint8_t df)
+{
+	char	query[500];
+	char	str[10];
+	char	*hcpath;
+	size_t	len;
+
+	(void) snprintf(str, sizeof (str), "%0hhx", df);
+
+	/*
+	 * get the string form of the hc detector, eg
+	 * /chassis=0/motherboard=0/hostbridge=0
+	 */
+	if (!fab_get_hcpath(hdl, nvl, &hcpath, &len))
+		return (NULL);
+
+	/*
+	 * Explanation of the XSL XPATH Query
+	 * Line 1: Look at all nodes with the node name "propval"
+	 * Line 2: See if the "BDF" of the node matches DF
+	 * Line 3-4: See if the the node is pciexrc
+	 * Line 5-6: See if the "ASRU" contains root complex
+	 * Line 7-8: Go up one level and get prop value of io/dev
+	 */
+	(void) snprintf(query, sizeof (query), "//propval["
+	    "@name='BDF' and contains(substring(@value, "
+	    "string-length(@value) - 1), '%s')]"
+	    "/parent::*/parent::*/propgroup[@name='pci']/propval"
+	    "[@name='extended-capabilities' and @value='%s']"
+	    "/parent::*/parent::*/propgroup[@name='protocol']"
+	    "/propval[@name='resource' and contains(@value, '%s')]"
+	    "/parent::*/parent::*/propgroup[@name='io']"
+	    "/propval[@name='dev']/@value", str, PCIEX_ROOT, hcpath);
+
+	fmd_hdl_free(hdl, hcpath, len);
+
+	return (fab_xpath_query(hdl, query));
+}
+
+char *
+fab_find_rppath_by_devbdf(fmd_hdl_t *hdl, nvlist_t *nvl, pcie_req_id_t bdf)
+{
+	xmlXPathObjectPtr xpathObj;
+	xmlNodeSetPtr nodes;
+	xmlNodePtr devNode;
+	char 	*retval, *temp;
+	char	query[500];
+	int	i, size, bus, dev, fn;
+	char	*hcpath;
+	size_t	len;
+
+	if (bdf != (uint16_t)-1) {
+		bus = (bdf & PCIE_REQ_ID_BUS_MASK) >> PCIE_REQ_ID_BUS_SHIFT;
+		dev = (bdf & PCIE_REQ_ID_DEV_MASK) >> PCIE_REQ_ID_DEV_SHIFT;
+		fn = (bdf & PCIE_REQ_ID_FUNC_MASK) >> PCIE_REQ_ID_FUNC_SHIFT;
+	}
+
+	/*
+	 * get the string form of the hc detector, eg
+	 * /chassis=0/motherboard=0/hostbridge=0
+	 */
+	if (!fab_get_hcpath(hdl, nvl, &hcpath, &len))
+		goto fail;
+
+	/*
+	 * Explanation of the XSL XPATH Query
+	 * Line 1: Look at all nodes with the node name "propval"
+	 * Line 2-3: See if the "value" of the node ends with correct PCIEx BDF
+	 * Line 4-5: See if the "value" of the node ends with correct PCI BDF
+	 * Line 6: Go up one level to the parent of the current node
+	 * Line 7: See if child node contains "ASRU" with the same PCIe Root
+	 * Line 8: Go up see all the ancestors
+	 */
+	(void) snprintf(query, sizeof (query), "//propval["
+	    "contains(substring(@value, string-length(@value) - 34), "
+	    "'pciexbus=%d/pciexdev=%d/pciexfn=%d') or "
+	    "contains(substring(@value, string-length(@value) - 28), "
+	    "'pcibus=%d/pcidev=%d/pcifn=%d')"
+	    "]/parent::"
+	    "*/propval[@name='resource' and contains(@value, '%s')]"
+	    "/ancestor::*",
+	    bus, dev, fn, bus, dev, fn, hcpath);
+
+	fmd_hdl_free(hdl, hcpath, len);
+
+	fmd_hdl_debug(hdl, "xpathObj query %s\n", query);
+
+	xpathObj = xmlXPathEvalExpression((const xmlChar *)query, fab_xpathCtx);
+
+	if (xpathObj == NULL)
+		goto fail;
+
+	nodes = xpathObj->nodesetval;
+	size = (nodes) ? nodes->nodeNr : 0;
+
+	fmd_hdl_debug(hdl, "xpathObj 0x%p type %d size %d\n",
+	    xpathObj, xpathObj->type, size);
+
+	for (i = 0; i < size; i++) {
+		devNode = nodes->nodeTab[i];
+		if (STRCMP(devNode->name, "range") &&
+		    HAS_PROP(devNode, "name")) {
+			char *tprop = GET_PROP(devNode, "name");
+
+			/* find "range name='pciexrc'" in ancestors */
+			if (STRCMP(tprop, PCIEX_ROOT)) {
+				/* go down to the pciexrc instance node */
+				FREE_PROP(tprop);
+				devNode = nodes->nodeTab[i+1];
+				goto found;
+			}
+			FREE_PROP(tprop);
+		}
+	}
+	goto fail;
+
+found:
+	/* Traverse down the xml tree to find the right propgroup */
+	for (devNode = devNode->children; devNode; devNode = devNode->next) {
+		if (STRCMP(devNode->name, "propgroup")) {
+			char *tprop = GET_PROP(devNode, "name");
+
+			if (STRCMP(tprop, "io")) {
+				FREE_PROP(tprop);
+				goto propgroup;
+			}
+			FREE_PROP(tprop);
+		}
+	}
+	goto fail;
+
+propgroup:
+	/* Retrive the "dev" propval and return */
+	for (devNode = devNode->children; devNode; devNode = devNode->next) {
+		if (STRCMP(devNode->name, "propval")) {
+			char *tprop = GET_PROP(devNode, "name");
+
+			if (STRCMP(tprop, "dev")) {
+				temp = GET_PROP(devNode, "value");
+				retval = fmd_hdl_strdup(hdl, temp, FMD_SLEEP);
+				fmd_hdl_debug(hdl, "RP Path: %s\n", retval);
+				xmlFree(temp);
+				xmlXPathFreeObject(xpathObj);
+			}
+			FREE_PROP(tprop);
+
+			return (retval);
+		}
+	}
+fail:
+	if (xpathObj != NULL)
+		xmlXPathFreeObject(xpathObj);
+	return (NULL);
+}
+
+/* ARGSUSED */
+boolean_t
+fab_get_rcpath(fmd_hdl_t *hdl, nvlist_t *nvl, char *rcpath)
+{
+	nvlist_t	*detector;
+	char		*path, *scheme;
+
+	if (nvlist_lookup_nvlist(nvl, FM_EREPORT_DETECTOR, &detector) != 0)
+		goto fail;
+	if (nvlist_lookup_string(detector, FM_FMRI_SCHEME, &scheme) != 0)
+		goto fail;
+
+	if (STRCMP(scheme, FM_FMRI_SCHEME_DEV)) {
+		if (nvlist_lookup_string(detector, FM_FMRI_DEV_PATH,
+		    &path) != 0)
+			goto fail;
+		(void) strncpy(rcpath, path, FM_MAX_CLASS);
+	} else if (STRCMP(scheme, FM_FMRI_SCHEME_HC)) {
+		/*
+		 * This should only occur for ereports that come from the RC
+		 * itself.  In this case convert HC scheme to dev path.
+		 */
+		if (fab_hc2dev_nvl(hdl, detector, &path)) {
+			(void) strncpy(rcpath, path, FM_MAX_CLASS);
+			fmd_hdl_strfree(hdl, path);
+		} else {
+			goto fail;
+		}
+	} else {
+		return (B_FALSE);
+	}
+
+	/*
+	 * Extract the RC path by taking the first device in the dev path
+	 *
+	 * /pci@0,0/pci8086,3605@2/pci8086,3500@0/pci8086,3514@1/pci8086,105e@0
+	 * - to -
+	 * /pci@0,0
+	 */
+	path = strchr(rcpath + 1, '/');
+	if (path)
+		path[0] = '\0';
+
+	return (B_TRUE);
+fail:
+	return (B_FALSE);
+}
+
+char *
+fab_find_bdf(fmd_hdl_t *hdl, nvlist_t *nvl, pcie_req_id_t bdf)
+{
+	char 	*retval;
+	char	query[500];
+	int	bus, dev, fn;
+	char	rcpath[255];
+
+	if (bdf != (uint16_t)-1) {
+		bus = (bdf & PCIE_REQ_ID_BUS_MASK) >> PCIE_REQ_ID_BUS_SHIFT;
+		dev = (bdf & PCIE_REQ_ID_DEV_MASK) >> PCIE_REQ_ID_DEV_SHIFT;
+		fn = (bdf & PCIE_REQ_ID_FUNC_MASK) >> PCIE_REQ_ID_FUNC_SHIFT;
+	}
+
+	if (!fab_get_rcpath(hdl, nvl, rcpath))
+		goto fail;
+
+	/*
+	 * Explanation of the XSL XPATH Query
+	 * Line 1: Look at all nodes with the node name "propval"
+	 * Line 2-3: See if the "value" of the node ends with correct PCIEx BDF
+	 * Line 4-5: See if the "value" of the node ends with correct PCI BDF
+	 * Line 6: Go up one level to the parent of the current node
+	 * Line 7: See if child node contains "ASRU" with the same PCIe Root
+	 * Line 8: Traverse up the parent and the other siblings and look for
+	 *	   the io "propgroup" and get the value of the dev "propval"
+	 */
+	(void) snprintf(query, sizeof (query), "//propval["
+	    "contains(substring(@value, string-length(@value) - 34), "
+	    "'pciexbus=%d/pciexdev=%d/pciexfn=%d') or "
+	    "contains(substring(@value, string-length(@value) - 28), "
+	    "'pcibus=%d/pcidev=%d/pcifn=%d')"
+	    "]/parent::"
+	    "*/propval[@name='ASRU' and contains(@value, '%s')]"
+	    "/parent::*/following-sibling::*[@name='io']/propval[@name='dev']/"
+	    "@value", bus, dev, fn, bus, dev, fn, rcpath);
+
+	retval = fab_xpath_query(hdl, query);
+	if (retval) {
+		fmd_hdl_debug(hdl, "BDF Dev Path: %s\n", retval);
+		return (retval);
+	}
+fail:
+	return (NULL);
+}
+
+char *
+fab_find_addr(fmd_hdl_t *hdl, nvlist_t *nvl, uint64_t addr)
+{
+	xmlXPathObjectPtr xpathObj;
+	xmlNodeSetPtr nodes;
+	xmlNodePtr devNode;
+	char *retval, *temp;
+	char query[500];
+	int size, i, j;
+	uint32_t prop[50];
+	char *token;
+	pci_regspec_t *assign_p;
+	uint64_t low, hi;
+	char rcpath[255];
+
+	if (!fab_get_rcpath(hdl, nvl, rcpath))
+		goto fail;
+
+	(void) snprintf(query, sizeof (query), "//propval["
+	    "@name='ASRU' and contains(@value, '%s')]/"
+	    "parent::*/following-sibling::*[@name='pci']/"
+	    "propval[@name='assigned-addresses']", rcpath);
+
+	fmd_hdl_debug(hdl, "xpathObj query %s\n", query);
+
+	xpathObj = xmlXPathEvalExpression((const xmlChar *)query, fab_xpathCtx);
+
+	if (xpathObj == NULL)
+		goto fail;
+
+	fmd_hdl_debug(hdl, "xpathObj 0x%p type %d\n", xpathObj, xpathObj->type);
+
+	nodes = xpathObj->nodesetval;
+	size = (nodes) ? nodes->nodeNr : 0;
+
+	/* Decode the list of assigned addresses xml nodes for each device */
+	for (i = 0; i < size; i++) {
+		char *tprop;
+
+		devNode = nodes->nodeTab[i];
+		if (!HAS_PROP(devNode, "value"))
+			continue;
+
+		/* Convert "string" assigned-addresses to pci_regspec_t */
+		j = 0;
+		tprop = GET_PROP(devNode, "value");
+		for (token = strtok(tprop, " "); token;
+		    token = strtok(NULL, " ")) {
+			prop[j++] = strtoul(token, (char **)NULL, 16);
+		}
+		prop[j] = (uint32_t)-1;
+		FREE_PROP(tprop);
+
+		/* Check if address belongs to this device */
+		for (assign_p = (pci_regspec_t *)prop;
+		    assign_p->pci_phys_hi != (uint_t)-1; assign_p++) {
+			low = assign_p->pci_phys_low;
+			hi = low + assign_p->pci_size_low;
+			if ((addr < hi) && (addr >= low)) {
+				fmd_hdl_debug(hdl, "Found Address\n");
+				goto found;
+			}
+		}
+	}
+	goto fail;
+
+found:
+	/* Traverse up the xml tree and back down to find the right propgroup */
+	for (devNode = devNode->parent->parent->children;
+	    devNode; devNode = devNode->next) {
+		char	*tprop;
+
+		tprop = GET_PROP(devNode, "name");
+		if (STRCMP(devNode->name, "propgroup") &&
+		    STRCMP(tprop, "io")) {
+			FREE_PROP(tprop);
+			goto propgroup;
+		}
+		FREE_PROP(tprop);
+	}
+	goto fail;
+
+propgroup:
+	/* Retrive the "dev" propval and return */
+	for (devNode = devNode->children; devNode; devNode = devNode->next) {
+		char 	*tprop;
+
+		tprop = GET_PROP(devNode, "name");
+		if (STRCMP(devNode->name, "propval") &&
+		    STRCMP(tprop, "dev")) {
+			FREE_PROP(tprop);
+			temp = GET_PROP(devNode, "value");
+			retval = fmd_hdl_strdup(hdl, temp, FMD_SLEEP);
+			fmd_hdl_debug(hdl, "Addr Dev Path: %s\n", retval);
+			xmlFree(temp);
+			xmlXPathFreeObject(xpathObj);
+			return (retval);
+		}
+		FREE_PROP(tprop);
+	}
+fail:
+	if (xpathObj != NULL)
+		xmlXPathFreeObject(xpathObj);
+	return (NULL);
+}
+
+void
+fab_pr(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl)
+{
+	nvpair_t *nvp;
+
+	for (nvp = nvlist_next_nvpair(nvl, NULL);
+	    nvp != NULL;
+	    nvp = nvlist_next_nvpair(nvl, nvp)) {
+
+		data_type_t type = nvpair_type(nvp);
+		const char *name = nvpair_name(nvp);
+
+		boolean_t b;
+		uint8_t i8;
+		uint16_t i16;
+		uint32_t i32;
+		uint64_t i64;
+		char *str;
+		nvlist_t *cnv;
+
+		nvlist_t **nvlarr;
+		uint_t arrsize;
+		int arri;
+
+
+		if (STRCMP(name, FM_CLASS))
+			continue; /* already printed by caller */
+
+		fmd_hdl_debug(hdl, " %s=", name);
+
+		switch (type) {
+		case DATA_TYPE_BOOLEAN:
+			fmd_hdl_debug(hdl, "DATA_TYPE_BOOLEAN 1");
+			break;
+
+		case DATA_TYPE_BOOLEAN_VALUE:
+			(void) nvpair_value_boolean_value(nvp, &b);
+			fmd_hdl_debug(hdl, "DATA_TYPE_BOOLEAN_VALUE %d",
+			    b ? "1" : "0");
+			break;
+
+		case DATA_TYPE_BYTE:
+			(void) nvpair_value_byte(nvp, &i8);
+			fmd_hdl_debug(hdl, "DATA_TYPE_BYTE 0x%x", i8);
+			break;
+
+		case DATA_TYPE_INT8:
+			(void) nvpair_value_int8(nvp, (void *)&i8);
+			fmd_hdl_debug(hdl, "DATA_TYPE_INT8 0x%x", i8);
+			break;
+
+		case DATA_TYPE_UINT8:
+			(void) nvpair_value_uint8(nvp, &i8);
+			fmd_hdl_debug(hdl, "DATA_TYPE_UINT8 0x%x", i8);
+			break;
+
+		case DATA_TYPE_INT16:
+			(void) nvpair_value_int16(nvp, (void *)&i16);
+			fmd_hdl_debug(hdl, "DATA_TYPE_INT16 0x%x", i16);
+			break;
+
+		case DATA_TYPE_UINT16:
+			(void) nvpair_value_uint16(nvp, &i16);
+			fmd_hdl_debug(hdl, "DATA_TYPE_UINT16 0x%x", i16);
+			break;
+
+		case DATA_TYPE_INT32:
+			(void) nvpair_value_int32(nvp, (void *)&i32);
+			fmd_hdl_debug(hdl, "DATA_TYPE_INT32 0x%x", i32);
+			break;
+
+		case DATA_TYPE_UINT32:
+			(void) nvpair_value_uint32(nvp, &i32);
+			fmd_hdl_debug(hdl, "DATA_TYPE_UINT32 0x%x", i32);
+			break;
+
+		case DATA_TYPE_INT64:
+			(void) nvpair_value_int64(nvp, (void *)&i64);
+			fmd_hdl_debug(hdl, "DATA_TYPE_INT64 0x%llx",
+			    (u_longlong_t)i64);
+			break;
+
+		case DATA_TYPE_UINT64:
+			(void) nvpair_value_uint64(nvp, &i64);
+			fmd_hdl_debug(hdl, "DATA_TYPE_UINT64 0x%llx",
+			    (u_longlong_t)i64);
+			break;
+
+		case DATA_TYPE_HRTIME:
+			(void) nvpair_value_hrtime(nvp, (void *)&i64);
+			fmd_hdl_debug(hdl, "DATA_TYPE_HRTIME 0x%llx",
+			    (u_longlong_t)i64);
+			break;
+
+		case DATA_TYPE_STRING:
+			(void) nvpair_value_string(nvp, &str);
+			fmd_hdl_debug(hdl, "DATA_TYPE_STRING \"%s\"",
+			    str ? str : "<NULL>");
+			break;
+
+		case DATA_TYPE_NVLIST:
+			fmd_hdl_debug(hdl, "[");
+			(void) nvpair_value_nvlist(nvp, &cnv);
+			fab_pr(hdl, NULL, cnv);
+			fmd_hdl_debug(hdl, " ]");
+			break;
+
+		case DATA_TYPE_BOOLEAN_ARRAY:
+		case DATA_TYPE_BYTE_ARRAY:
+		case DATA_TYPE_INT8_ARRAY:
+		case DATA_TYPE_UINT8_ARRAY:
+		case DATA_TYPE_INT16_ARRAY:
+		case DATA_TYPE_UINT16_ARRAY:
+		case DATA_TYPE_INT32_ARRAY:
+		case DATA_TYPE_UINT32_ARRAY:
+		case DATA_TYPE_INT64_ARRAY:
+		case DATA_TYPE_UINT64_ARRAY:
+		case DATA_TYPE_STRING_ARRAY:
+			fmd_hdl_debug(hdl, "[...]");
+			break;
+		case DATA_TYPE_NVLIST_ARRAY:
+			arrsize = 0;
+			(void) nvpair_value_nvlist_array(nvp, &nvlarr,
+			    &arrsize);
+
+			for (arri = 0; arri < arrsize; arri++) {
+				fab_pr(hdl, ep, nvlarr[arri]);
+			}
+
+			break;
+		case DATA_TYPE_UNKNOWN:
+			fmd_hdl_debug(hdl, "<unknown>");
+			break;
+		}
+	}
+}
--- a/usr/src/cmd/mdb/common/modules/crypto/impl.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/cmd/mdb/common/modules/crypto/impl.c	Fri Dec 11 10:41:17 2009 -0800
@@ -175,11 +175,13 @@
 	mdb_printf("pd_provider_list:\t%p\n", desc.pd_provider_list);
 
 	mdb_printf("pd_resume_cv:\t\t%hd\n", desc.pd_resume_cv._opaque);
-	mdb_printf("pd_flags:\t\t%s %s %s %s %s\n",
+	mdb_printf("pd_flags:\t\t%s %s %s %s %s %s\n",
 	    (desc.pd_flags & CRYPTO_HIDE_PROVIDER) ?
 	    "CRYPTO_HIDE_PROVIDER" : " ",
 	    (desc.pd_flags & CRYPTO_HASH_NO_UPDATE) ?
 	    "CRYPTO_HASH_NO_UPDATE" : " ",
+	    (desc.pd_flags & CRYPTO_HMAC_NO_UPDATE) ?
+	    "CRYPTO_HMAC_NO_UPDATE" : " ",
 	    (desc.pd_flags & CRYPTO_SYNCHRONOUS) ?
 	    "CRYPTO_SYNCHRONOUS" : " ",
 	    (desc.pd_flags & KCF_LPROV_MEMBER) ?
@@ -188,6 +190,8 @@
 	    "KCF_PROV_RESTRICTED" : " ");
 	if (desc.pd_flags & CRYPTO_HASH_NO_UPDATE)
 		mdb_printf("pd_hash_limit:\t\t%u\n", desc.pd_hash_limit);
+	if (desc.pd_flags & CRYPTO_HMAC_NO_UPDATE)
+		mdb_printf("pd_hmac_limit:\t\t%u\n", desc.pd_hmac_limit);
 
 	mdb_printf("pd_kstat:\t\t%p\n", desc.pd_kstat);
 
--- a/usr/src/lib/brand/solaris10/s10_brand/common/s10_brand.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/brand/solaris10/s10_brand/common/s10_brand.c	Fri Dec 11 10:41:17 2009 -0800
@@ -617,7 +617,7 @@
 	boolean_t fl_init_pin;
 	boolean_t fl_set_pin;
 
-	boolean_t prov_is_limited;
+	boolean_t prov_is_hash_limited;
 	uint32_t prov_hash_threshold;
 	uint32_t prov_hash_limit;
 } s10_crypto_function_list_t;
@@ -742,7 +742,7 @@
 	struct_assign(s10_param, native_param, fl_list.fl_init_pin);
 	struct_assign(s10_param, native_param, fl_list.fl_set_pin);
 
-	struct_assign(s10_param, native_param, fl_list.prov_is_limited);
+	struct_assign(s10_param, native_param, fl_list.prov_is_hash_limited);
 	struct_assign(s10_param, native_param, fl_list.prov_hash_threshold);
 	struct_assign(s10_param, native_param, fl_list.prov_hash_limit);
 
--- a/usr/src/lib/fm/topo/libtopo/common/hc.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/fm/topo/libtopo/common/hc.c	Fri Dec 11 10:41:17 2009 -0800
@@ -182,6 +182,7 @@
 	{ RISER, TOPO_STABILITY_PRIVATE },
 	{ SHELF, TOPO_STABILITY_PRIVATE },
 	{ SES_ENCLOSURE, TOPO_STABILITY_PRIVATE },
+	{ SP, TOPO_STABILITY_PRIVATE },
 	{ STRAND, TOPO_STABILITY_PRIVATE },
 	{ SUBCHASSIS, TOPO_STABILITY_PRIVATE },
 	{ SYSTEMBOARD, TOPO_STABILITY_PRIVATE },
--- a/usr/src/lib/fm/topo/libtopo/common/topo_hc.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/fm/topo/libtopo/common/topo_hc.h	Fri Dec 11 10:41:17 2009 -0800
@@ -79,6 +79,7 @@
 #define	RISER		"riser"
 #define	SHELF		"shelf"
 #define	SES_ENCLOSURE	"ses-enclosure"
+#define	SP		"sp"
 #define	SUBCHASSIS	"subchassis"
 #define	SYSTEMBOARD	"systemboard"
 #define	XAUI		"xaui"
--- a/usr/src/lib/fm/topo/modules/sun4v/xaui/Makefile	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/fm/topo/modules/sun4v/xaui/Makefile	Fri Dec 11 10:41:17 2009 -0800
@@ -20,11 +20,9 @@
 #
 
 #
-# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
-# ident	"%Z%%M%	%I%	%E% SMI"
-#
 
 MODULE = xaui
 ARCH = sun4v
@@ -34,3 +32,7 @@
 MODULESRCS = $(XAUISRCS)
 
 include ../../Makefile.plugin
+
+LDLIBS += -ldevinfo -lmdesc -lldom
+
+CPPFLAGS += -I. -I$(ROOT)/usr/platform/sun4v/include
--- a/usr/src/lib/fm/topo/modules/sun4v/xaui/xaui.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/fm/topo/modules/sun4v/xaui/xaui.c	Fri Dec 11 10:41:17 2009 -0800
@@ -24,10 +24,15 @@
  * Use is subject to license terms.
  */
 
-#include <string.h>
+
+#include <strings.h>
 #include <fm/topo_mod.h>
 #include <fm/topo_hc.h>
 #include <sys/fm/protocol.h>
+#include <sys/fm/ldom.h>
+#include <sys/mdesc.h>
+#include <assert.h>
+#include <sys/systeminfo.h>
 #include "xaui.h"
 
 /*
@@ -39,8 +44,9 @@
 extern "C" {
 #endif
 
-#define	XAUI_VERSION	TOPO_VERSION
-#define	XFP_MAX	1	/* max number of xfp per xaui card */
+#define	XAUI_VERSION		TOPO_VERSION
+#define	XFP_MAX			1	/* max number of xfp per xaui card */
+#define	MAX_PCIADDR_DEPTH	3	/* Bus/Dev/Func */
 
 static int xaui_enum(topo_mod_t *, tnode_t *, const char *, topo_instance_t,
 		    topo_instance_t, void *, void *);
@@ -58,6 +64,10 @@
 	1
 };
 
+static topo_mod_t *xaui_mod_hdl = NULL;
+static int freeprilabel = 0;
+static int ispci = 0;
+
 /*ARGSUSED*/
 void
 _topo_init(topo_mod_t *mod, topo_version_t version)
@@ -148,55 +158,324 @@
 	return (0);
 }
 
+
+static void *
+xaui_topo_alloc(size_t size)
+{
+	assert(xaui_mod_hdl != NULL);
+	return (topo_mod_alloc(xaui_mod_hdl, size));
+}
+
+
+static void
+xaui_topo_free(void *data, size_t size)
+{
+	assert(xaui_mod_hdl != NULL);
+	topo_mod_free(xaui_mod_hdl, data, size);
+}
+
+
+static char *
+xaui_get_path(topo_mod_t *mod, void *priv, topo_instance_t inst)
+{
+	int i = 0;
+	int rv;
+	di_node_t dnode;
+	char *devfs_path;
+	char *path;
+	char *buf = NULL;
+	char *freebuf;
+	char *addr[MAX_PCIADDR_DEPTH] = { NULL };
+	char *token;
+	char *lastp;
+	size_t buf_len;
+	size_t path_len = 0;
+
+	/*
+	 * There are two ways to get here:
+	 * 1. niu enum  - private data is the di_node_t for this xaui
+	 *		- instance is the ethernet function number
+	 *    device path looks like: /niu@80/network@0:nxge@0
+	 *    PRI path looks like:    /@80/@0
+	 *
+	 * 2. pcibus enum - private data is the parent tnode_t
+	 *		  - instance is the pci function number
+	 *    device path looks like: /pci@500/pci@0/pci@8/network@0:nxge0
+	 *    PRI path looks like:    /@500/@0/@8/@0
+	 *
+	 *    PRI path for pciex is /@Bus/@Dev/@Func/@Instance
+	 *
+	 *    The parent topo_node for xaui is pciexfn; check to see if the
+	 *    private data is a topo_node by looking for the "pciexfn" name.
+	 */
+	if (ispci == 1) {
+		/* coming from pcibus */
+		topo_mod_dprintf(mod, "from pcibus\n");
+		/* PCI Func tnode */
+		dnode = topo_node_getspecific((tnode_t *)priv);
+	} else {
+		/* coming from niu */
+		topo_mod_dprintf(mod, "from niu\n");
+		dnode = (struct di_node *)priv;
+	}
+	if (dnode == DI_NODE_NIL) {
+		topo_mod_dprintf(mod, "DI_NODE_NIL\n");
+		return (NULL);
+	}
+
+	/* get device path */
+	devfs_path = di_devfs_path(dnode);
+	if (devfs_path == NULL) {
+		topo_mod_dprintf(mod, "NULL devfs_path\n");
+		return (NULL);
+	}
+
+	/* alloc enough space to hold the path */
+	topo_mod_dprintf(mod, "devfs_path (%s)\n", devfs_path);
+	buf_len = strlen(devfs_path) + 1;
+	buf = (char *)xaui_topo_alloc(buf_len);
+	if (buf == NULL) {
+		return (NULL);
+	}
+	freebuf = buf; /* strtok_r is destructive */
+	(void) strcpy(buf, devfs_path);
+
+	if (ispci == 1) {
+		/*
+		 * devfs path for pciexfn looks like
+		 * /pci@BUS/pci@DEV/pci@FUNC
+		 *
+		 * Strip "/pci@" chars from path and add /@Instance
+		 */
+		topo_mod_dprintf(mod, "ispci\n");
+		if ((token = strtok_r(buf, "/pci@", &lastp)) != NULL) {
+			addr[i] = topo_mod_strdup(mod, token);
+			path_len = strlen(token);
+			while ((token = strtok_r(NULL, "/pci@", &lastp)) !=
+			    NULL) {
+				if (++i < MAX_PCIADDR_DEPTH) {
+					addr[i] = topo_mod_strdup(mod, token);
+					path_len += strlen(token);
+				} else {
+					xaui_topo_free(freebuf, buf_len);
+					return (NULL);
+				}
+			}
+		} else {
+			xaui_topo_free(freebuf, buf_len);
+			return (NULL);
+		}
+		xaui_topo_free(freebuf, buf_len);
+
+		/* path: addresses + '/@' + '/@instance' (0/1)  + '\0' */
+		path_len += ((MAX_PCIADDR_DEPTH * 2) + 3 + 1);
+		path = (char *)xaui_topo_alloc(path_len);
+		rv = snprintf(path, path_len, "/@%s/@%s/@%s/@%d",
+		    addr[0], addr[1], addr[2], inst);
+		if (rv < 0) {
+			return (NULL);
+		}
+	} else {
+		/* need to strip "/niu@" chars from path and add /@Instance */
+		token = strtok_r(buf, "/niu@", &lastp);
+		addr[0] = topo_mod_strdup(mod, token);
+		path_len = strlen(token);
+		xaui_topo_free(freebuf, buf_len);
+
+		/* path: address + '/@' + '/@instance' (0/1) + '\0' */
+		path_len += (2 + 3 +1);
+		path = (char *)xaui_topo_alloc(path_len);
+		rv = snprintf(path, path_len, "/@%s/@%d", addr[0], inst);
+		if (rv < 0) {
+			return (NULL);
+		}
+	}
+	topo_mod_dprintf(mod, "xaui_get_path: path (%s)\n", path);
+
+	/* cleanup */
+	for (i = 0; i < MAX_PCIADDR_DEPTH; i++) {
+		if (addr[i] != NULL) {
+			xaui_topo_free(addr[i], strlen(addr[i]) + 1);
+		}
+	}
+
+	/* should return something like /@500/@0/@8/@0 or /@80/@0 */
+	return (path);
+}
+
+
 static int
-xaui_label_set(topo_mod_t *mod, tnode_t *node, topo_instance_t n)
+xaui_get_pri_label(topo_mod_t *mod, topo_instance_t n, void *priv,
+    char **labelp)
+{
+	ldom_hdl_t *hdlp;
+	uint32_t type = 0;
+	ssize_t bufsize = 0;
+	uint64_t *bufp;
+	md_t *mdp;
+	int num_nodes, ncomp;
+	mde_cookie_t *listp;
+	char *pstr = NULL;
+	int i;
+	char *path;
+
+	/* Get device path minus the device names */
+	path = xaui_get_path(mod, priv, n);
+	if (path == NULL) {
+		topo_mod_dprintf(mod, "can't get path\n");
+		return (-1);
+	}
+
+	hdlp = ldom_init(xaui_topo_alloc, xaui_topo_free);
+	if (hdlp == NULL) {
+		topo_mod_dprintf(mod, "ldom_init failed\n");
+		return (-1);
+	}
+
+	(void) ldom_get_type(hdlp, &type);
+	if ((type & LDOM_TYPE_CONTROL) != 0) {
+		bufsize = ldom_get_core_md(hdlp, &bufp);
+	} else {
+		bufsize = ldom_get_local_md(hdlp, &bufp);
+	}
+	if (bufsize < 1) {
+		topo_mod_dprintf(mod, "failed to get pri/md (%d)\n", bufsize);
+		ldom_fini(hdlp);
+		return (-1);
+	}
+
+	if ((mdp = md_init_intern(bufp, xaui_topo_alloc, xaui_topo_free)) ==
+	    NULL || (num_nodes = md_node_count(mdp)) < 1) {
+		topo_mod_dprintf(mod, "md_init_intern failed\n");
+		xaui_topo_free(bufp, (size_t)bufsize);
+		ldom_fini(hdlp);
+		return (-1);
+	}
+
+	if ((listp = (mde_cookie_t *)xaui_topo_alloc(
+	    sizeof (mde_cookie_t) * num_nodes)) == NULL) {
+		topo_mod_dprintf(mod, "can't alloc listp\n");
+		xaui_topo_free(bufp, (size_t)bufsize);
+		(void) md_fini(mdp);
+		ldom_fini(hdlp);
+		return (-1);
+	}
+
+	ncomp = md_scan_dag(mdp, MDE_INVAL_ELEM_COOKIE,
+	    md_find_name(mdp, "component"),
+	    md_find_name(mdp, "fwd"), listp);
+	if (ncomp <= 0) {
+		topo_mod_dprintf(mod, "no component nodes found\n");
+		xaui_topo_free(listp, sizeof (mde_cookie_t) * num_nodes);
+		xaui_topo_free(bufp, (size_t)bufsize);
+		(void) md_fini(mdp);
+		ldom_fini(hdlp);
+		return (-1);
+	}
+	topo_mod_dprintf(mod, "number of comps (%d)\n", ncomp);
+
+	for (i = 0; i < ncomp; i++) {
+		/*
+		 * Look for type == "io", topo-hc-name == "xaui";
+		 * match "path" md property.
+		 */
+		if ((md_get_prop_str(mdp, listp[i], "type", &pstr) == 0) &&
+		    (pstr != NULL) &&
+		    (strncmp(pstr, "io", strlen(pstr)) == 0) &&
+		    (md_get_prop_str(mdp, listp[i], "topo-hc-name", &pstr)
+		    == 0) && (pstr != NULL) &&
+		    (strncmp(pstr, "xaui", strlen(pstr)) == 0) &&
+		    (md_get_prop_str(mdp, listp[i], "path", &pstr) == 0) &&
+		    (pstr != NULL)) {
+			/* check node path */
+			if (strncmp(pstr, path, sizeof (path)) == 0) {
+				/* this is the node, grab the label */
+				if (md_get_prop_str(mdp, listp[i], "nac",
+				    &pstr) == 0) {
+					*labelp = topo_mod_strdup(mod, pstr);
+					/* need to free this later */
+					freeprilabel = 1;
+					break;
+				}
+			}
+		}
+	}
+
+	xaui_topo_free(listp, sizeof (mde_cookie_t) * num_nodes);
+	xaui_topo_free(bufp, (size_t)bufsize);
+	(void) md_fini(mdp);
+	ldom_fini(hdlp);
+
+	if (path != NULL) {
+		xaui_topo_free(path, strlen(path) + 1);
+	}
+	return (0);
+}
+
+
+static int
+xaui_label_set(topo_mod_t *mod, tnode_t *node, topo_instance_t n, void *priv)
 {
 	const char *label = NULL;
 	char *plat, *pp;
 	int err;
 	int i, p;
 
-	if (Phyxaui_Names == NULL)
-		return (-1);
-
-	if (topo_prop_get_string(node,
-	    FM_FMRI_AUTHORITY, FM_FMRI_AUTH_PRODUCT, &plat, &err) < 0) {
-		return (topo_mod_seterrno(mod, err));
-	}
-	/*
-	 * Trim SUNW, from the platform name
-	 */
-	pp = strchr(plat, ',');
-	if (pp == NULL)
-		pp = plat;
-	else
-		++pp;
+	(void) xaui_get_pri_label(mod, n, priv, (char **)&label);
+	if (label == NULL) {
+		topo_mod_dprintf(mod, "no PRI node for label\n");
+		if (Phyxaui_Names == NULL)
+			return (-1);
 
-	for (p = 0; p < Phyxaui_Names->psn_nplats; p++) {
-		if (strcmp(Phyxaui_Names->psn_names[p].pnm_platform,
-		    pp) != 0)
-			continue;
-		for (i = 0; i < Phyxaui_Names->psn_names[p].pnm_nnames; i++) {
-			physnm_t ps;
-			ps = Phyxaui_Names->psn_names[p].pnm_names[i];
-			if (ps.ps_num == n) {
-				label = ps.ps_label;
-				break;
+		if (topo_prop_get_string(node,
+		    FM_FMRI_AUTHORITY, FM_FMRI_AUTH_PRODUCT, &plat, &err) < 0) {
+			return (topo_mod_seterrno(mod, err));
+		}
+		/*
+		 * Trim SUNW, from the platform name
+		 */
+		pp = strchr(plat, ',');
+		if (pp == NULL)
+			pp = plat;
+		else
+			++pp;
+
+		for (p = 0; p < Phyxaui_Names->psn_nplats; p++) {
+			if (strcmp(Phyxaui_Names->psn_names[p].pnm_platform,
+			    pp) != 0)
+				continue;
+			for (i = 0; i < Phyxaui_Names->psn_names[p].pnm_nnames;
+			    i++) {
+				physnm_t ps;
+				ps = Phyxaui_Names->psn_names[p].pnm_names[i];
+				if (ps.ps_num == n) {
+					label = ps.ps_label;
+					break;
+				}
 			}
+			break;
 		}
-		break;
+		topo_mod_strfree(mod, plat);
 	}
-	topo_mod_strfree(mod, plat);
 
 	if (label != NULL) {
 		if (topo_prop_set_string(node, TOPO_PGROUP_PROTOCOL,
 		    TOPO_PROP_LABEL, TOPO_PROP_IMMUTABLE,
 		    label, &err) != 0) {
+			if (freeprilabel == 1) {
+				topo_mod_strfree(mod, (char *)label);
+			}
 			return (topo_mod_seterrno(mod, err));
 		}
+		if (freeprilabel == 1) {
+			topo_mod_strfree(mod, (char *)label);
+		}
 	}
+
 	return (0);
 }
+
+
 /*ARGSUSED*/
 static tnode_t *
 xaui_declare(tnode_t *parent, const char *name, topo_instance_t i,
@@ -213,7 +492,15 @@
 
 	(void) xaui_fru_set(mod, ntn);
 
-	(void) xaui_label_set(mod, ntn, i);
+	/* when coming from pcibus: private data == parent tnode */
+	if (priv == (void *)parent) {
+		ispci = 1;
+	}
+
+	(void) xaui_label_set(mod, ntn, i, priv);
+
+	/* reset pcibus/niu switch */
+	ispci = 0;
 
 	/* set ASRU to resource fmri */
 	if (topo_prop_get_fmri(ntn, TOPO_PGROUP_PROTOCOL,
@@ -232,6 +519,7 @@
 	return (ntn);
 }
 
+
 static topo_mod_t *
 xfp_enum_load(topo_mod_t *mp)
 {
@@ -243,10 +531,12 @@
 	}
 	return (rp);
 }
+
+
 /*ARGSUSED*/
 static int
 xaui_enum(topo_mod_t *mod, tnode_t *rnode, const char *name,
-	topo_instance_t min, topo_instance_t max, void *arg, void *notused)
+	topo_instance_t min, topo_instance_t max, void *arg, void *priv)
 {
 	tnode_t *xauin;
 
@@ -256,13 +546,16 @@
 		    XAUI);
 		return (0);
 	}
+
+	xaui_mod_hdl = mod;
+
 	/*
 	 * Load XFP enum
 	 */
 	if (xfp_enum_load(mod) == NULL)
 		return (-1);
 
-	if ((xauin = xaui_declare(rnode, name, min, arg, mod)) == NULL)
+	if ((xauin = xaui_declare(rnode, name, min, priv, mod)) == NULL)
 		return (-1);
 
 	/* set the private data to be the instance number of niufn */
--- a/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelDigest.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelDigest.c	Fri Dec 11 10:41:17 2009 -0800
@@ -209,7 +209,7 @@
 		pMechanism = &(opp->mech);
 
 		if ((ulDataLen < SLOT_THRESHOLD(session_p)) ||
-		    (ulDataLen > SLOT_MAX_INDATA_LEN(session_p))) {
+		    (ulDataLen > SLOT_HASH_MAX_INDATA_LEN(session_p))) {
 			session_p->digest.flags |= CRYPTO_EMULATE_USING_SW;
 			(void) pthread_mutex_unlock(&session_p->session_mutex);
 			ses_lock_held = B_FALSE;
--- a/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelEmulate.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelEmulate.c	Fri Dec 11 10:41:17 2009 -0800
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -152,14 +152,23 @@
     CK_ULONG ulPartLen, int opflag)
 {
 	CK_RV rv;
+	int maxlen;
 	digest_buf_t *bufp;
 	boolean_t use_soft = B_FALSE;
 	crypto_active_op_t *opp;
 
-	opp = (opflag & OP_DIGEST) ? &(session_p->digest) : \
-	    ((opflag & OP_SIGN) ? &(session_p->sign) : &(session_p->verify));
-
-	if (!SLOT_HAS_LIMITED_HASH(session_p))
+	if (opflag & OP_DIGEST) {
+		opp = &(session_p->digest);
+		if (!SLOT_HAS_LIMITED_HASH(session_p))
+			return (CKR_ARGUMENTS_BAD);
+		maxlen =  SLOT_HASH_MAX_INDATA_LEN(session_p);
+	} else if (opflag & (OP_SIGN | OP_VERIFY)) {
+		opp = (opflag & OP_SIGN) ?
+		    &(session_p->sign) : &(session_p->verify);
+		if (!SLOT_HAS_LIMITED_HMAC(session_p))
+			return (CKR_ARGUMENTS_BAD);
+		maxlen =  SLOT_HMAC_MAX_INDATA_LEN(session_p);
+	} else
 		return (CKR_ARGUMENTS_BAD);
 
 	if (opp->flags & CRYPTO_EMULATE_USING_SW) {
@@ -175,7 +184,7 @@
 	}
 
 	/* Did we exceed the maximum allowed? */
-	if (bufp->indata_len + ulPartLen > SLOT_MAX_INDATA_LEN(session_p)) {
+	if (bufp->indata_len + ulPartLen > maxlen) {
 		use_soft = B_TRUE;
 	} else if (ulPartLen > (bufp->buf_len - bufp->indata_len))  {
 		int siz = ulPartLen < bufp->buf_len ?
--- a/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelEmulate.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelEmulate.h	Fri Dec 11 10:41:17 2009 -0800
@@ -20,15 +20,13 @@
  */
 
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef	_KERNEL_EMULATE_H
 #define	_KERNEL_EMULATE_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -41,9 +39,14 @@
 #include "kernelSoftCommon.h"
 
 #define	SLOT_THRESHOLD(sp) (slot_table[sp->ses_slotid]->sl_threshold)
-#define	SLOT_MAX_INDATA_LEN(sp)	(slot_table[sp->ses_slotid]->sl_max_inlen)
+#define	SLOT_HASH_MAX_INDATA_LEN(sp) \
+	(slot_table[sp->ses_slotid]->sl_hash_max_inlen)
+#define	SLOT_HMAC_MAX_INDATA_LEN(sp) \
+	(slot_table[sp->ses_slotid]->sl_hmac_max_inlen)
 #define	SLOT_HAS_LIMITED_HASH(sp) (slot_table[sp->ses_slotid]->sl_flags & \
 	CRYPTO_LIMITED_HASH_SUPPORT)
+#define	SLOT_HAS_LIMITED_HMAC(sp) (slot_table[sp->ses_slotid]->sl_flags & \
+	CRYPTO_LIMITED_HMAC_SUPPORT)
 #define	get_sp(opp)	(((digest_buf_t *)((opp)->context))->soft_sp)
 #define	get_spp(opp)	(&(((digest_buf_t *)((opp)->context))->soft_sp))
 
--- a/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSession.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSession.c	Fri Dec 11 10:41:17 2009 -0800
@@ -19,12 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #include <pthread.h>
 #include <errno.h>
 #include <security/cryptoki.h>
@@ -244,8 +242,8 @@
 
 	/*
 	 * XXX Need to support this case in future.
-	 * This is the case where we exceeded SLOT_MAX_INDATA_LEN and
-	 * hence started using libmd. SLOT_MAX_INDATA_LEN is at least
+	 * This is the case where we exceeded SLOT_HASH_MAX_INDATA_LEN and
+	 * hence started using libmd. SLOT_HASH_MAX_INDATA_LEN is at least
 	 * 64K for current crypto framework providers and web servers
 	 * do not need to clone digests that big for SSL operations.
 	 */
@@ -369,7 +367,7 @@
 	/* compute the data buffer length */
 	indata_len = expected_len - sizeof (int) -
 	    sizeof (CK_STATE) - sizeof (crypto_active_op_t);
-	if (indata_len > SLOT_MAX_INDATA_LEN(session_p))
+	if (indata_len > SLOT_HASH_MAX_INDATA_LEN(session_p))
 		return (CKR_SAVED_STATE_INVALID);
 	src += sizeof (int);
 
--- a/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSign.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSign.c	Fri Dec 11 10:41:17 2009 -0800
@@ -137,7 +137,7 @@
 		rv = crypto2pkcs11_error_number(sign_init.si_return_value);
 	}
 
-	if (rv == CKR_OK && SLOT_HAS_LIMITED_HASH(session_p) &&
+	if (rv == CKR_OK && SLOT_HAS_LIMITED_HMAC(session_p) &&
 	    is_hmac(pMechanism->mechanism)) {
 		if (key_p->is_lib_obj && key_p->class == CKO_SECRET_KEY) {
 			(void) pthread_mutex_lock(&session_p->session_mutex);
@@ -221,7 +221,7 @@
 
 	if (session_p->sign.flags & CRYPTO_EMULATE) {
 		if ((ulDataLen < SLOT_THRESHOLD(session_p)) ||
-		    (ulDataLen > SLOT_MAX_INDATA_LEN(session_p))) {
+		    (ulDataLen > SLOT_HMAC_MAX_INDATA_LEN(session_p))) {
 			session_p->sign.flags |= CRYPTO_EMULATE_USING_SW;
 			(void) pthread_mutex_unlock(&session_p->session_mutex);
 			ses_lock_held = B_FALSE;
--- a/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSlot.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSlot.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,15 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef	_KERNEL_SLOT_H
 #define	_KERNEL_SLOT_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifdef	__cplusplus
 extern "C" {
 #endif
@@ -44,11 +42,16 @@
 
 /*
  * This slot has limited hash support. It can not do multi-part
- * hashing (updates) and it can not hash input data of size
- * greater than sl_max_inlen.
+ * hashing (updates).
  */
 #define	CRYPTO_LIMITED_HASH_SUPPORT	0x00000001
 
+/*
+ * This slot has limited hmac support. It can not do multi-part
+ * hmac (updates).
+ */
+#define	CRYPTO_LIMITED_HMAC_SUPPORT	0x00000002
+
 typedef struct kernel_slot {
 	CK_SLOT_ID		sl_provider_id;	/* kernel provider ID */
 	crypto_function_list_t	sl_func_list;	/* function list */
@@ -65,12 +68,18 @@
 	 * The maximum input data that can be digested by this slot.
 	 * Used only if CRYPTO_LIMITED_HASH_SUPPORT is set in sl_flags.
 	 */
-	int			sl_max_inlen;
+	int			sl_hash_max_inlen;
+
+	/*
+	 * The maximum input data that can be hmac'ed by this slot.
+	 * Used only if CRYPTO_LIMITED_HMAC_SUPPORT is set in sl_flags.
+	 */
+	int			sl_hmac_max_inlen;
 
 	/*
 	 * The threshold for input data size. We use this slot
 	 * only if data size is at or above this value. Used only if
-	 * CRYPTO_LIMITED_HASH_SUPPORT is set.
+	 * CRYPTO_LIMITED_HASH_SUPPORT or CRYPTO_LIMITED_HMAC_SUPPORT is set.
 	 */
 	int			sl_threshold;
 
--- a/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSlottable.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelSlottable.c	Fri Dec 11 10:41:17 2009 -0800
@@ -19,12 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #include <errno.h>
 #include <security/cryptoki.h>
 #include <sys/crypto/ioctl.h>
@@ -190,10 +188,18 @@
 	pslot->sl_func_list.fl_set_pin = fl.fl_list.fl_set_pin;
 
 	pslot->sl_flags = 0;
-	if (fl.fl_list.prov_is_limited) {
-		pslot->sl_flags = CRYPTO_LIMITED_HASH_SUPPORT;
+	if (fl.fl_list.prov_is_hash_limited) {
+		pslot->sl_flags |= CRYPTO_LIMITED_HASH_SUPPORT;
+		pslot->sl_hash_max_inlen = fl.fl_list.prov_hash_limit;
+	}
+
+	if (fl.fl_list.prov_is_hmac_limited) {
+		pslot->sl_flags |= CRYPTO_LIMITED_HMAC_SUPPORT;
+		pslot->sl_hmac_max_inlen = fl.fl_list.prov_hmac_limit;
+	}
+
+	if (fl.fl_list.prov_is_hash_limited | fl.fl_list.prov_is_hmac_limited) {
 		pslot->sl_threshold = fl.fl_list.prov_hash_threshold;
-		pslot->sl_max_inlen = fl.fl_list.prov_hash_limit;
 	}
 
 	pslot->total_threshold_count = fl.fl_list.total_threshold_count;
--- a/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelVerify.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/lib/pkcs11/pkcs11_kernel/common/kernelVerify.c	Fri Dec 11 10:41:17 2009 -0800
@@ -140,7 +140,7 @@
 		rv = crypto2pkcs11_error_number(verify_init.vi_return_value);
 	}
 
-	if (rv == CKR_OK && SLOT_HAS_LIMITED_HASH(session_p) &&
+	if (rv == CKR_OK && SLOT_HAS_LIMITED_HMAC(session_p) &&
 	    is_hmac(pMechanism->mechanism)) {
 		if (key_p->is_lib_obj && key_p->class == CKO_SECRET_KEY) {
 			(void) pthread_mutex_lock(&session_p->session_mutex);
@@ -220,7 +220,7 @@
 
 	if (session_p->verify.flags & CRYPTO_EMULATE) {
 		if ((ulDataLen < SLOT_THRESHOLD(session_p)) ||
-		    (ulDataLen > SLOT_MAX_INDATA_LEN(session_p))) {
+		    (ulDataLen > SLOT_HMAC_MAX_INDATA_LEN(session_p))) {
 			session_p->verify.flags |= CRYPTO_EMULATE_USING_SW;
 			(void) pthread_mutex_unlock(&session_p->session_mutex);
 			ses_lock_held = B_FALSE;
--- a/usr/src/pkgdefs/SUNWfmd/prototype_com	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/pkgdefs/SUNWfmd/prototype_com	Fri Dec 11 10:41:17 2009 -0800
@@ -63,6 +63,7 @@
 f none usr/lib/fm/eft/neptune_xfp.eft 444 root bin
 f none usr/lib/fm/eft/pci.eft 444 root bin
 f none usr/lib/fm/eft/pciex.eft 444 root bin
+f none usr/lib/fm/eft/pciexrc.eft 444 root bin
 f none usr/lib/fm/eft/sensor.eft 444 root bin
 d none usr/lib/fm/fmd 755 root bin
 f none usr/lib/fm/fmd/fmd 555 root bin
--- a/usr/src/pkgdefs/SUNWfmd/prototype_sparc	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/pkgdefs/SUNWfmd/prototype_sparc	Fri Dec 11 10:41:17 2009 -0800
@@ -131,6 +131,7 @@
 f none usr/platform/sun4v/lib/fm/eft/zambezi.eft 444 root bin
 f none usr/platform/sun4v/lib/fm/eft/gcpu.eft 444 root bin
 f none usr/platform/sun4v/lib/fm/eft/gmem.eft 444 root bin
+f none usr/platform/sun4v/lib/fm/eft/sp.eft 444 root bin
 d none usr/platform/sun4v/lib/fm/fmd 755 root bin
 d none usr/platform/sun4v/lib/fm/fmd/plugins 755 root bin
 f none usr/platform/sun4v/lib/fm/fmd/plugins/cpumem-diagnosis.so 555 root bin
--- a/usr/src/pkgdefs/SUNWiopc.v/postinstall	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/pkgdefs/SUNWiopc.v/postinstall	Fri Dec 11 10:41:17 2009 -0800
@@ -21,11 +21,9 @@
 #
 
 #
-# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
-# ident	"%Z%%M%	%I%	%E% SMI"
-#
  
 PATH="/usr/bin:/usr/sbin:${PATH}"
 export PATH
@@ -44,4 +42,7 @@
 installed n2piupc || add_drv -b ${BASEDIR} -m '* 0666 root sys' \
 	-i '"SUNW,n2piu-pr" "SUNW,vfpiu-pr"' n2piupc || EXIT=1
 
+installed iospc || add_drv -b ${BASEDIR} -m '* 0666 root sys' \
+	-i '"SUNW,ktios-pr" "SUNW,rfios-pr"' iospc || EXIT=1
+
 exit $EXIT
--- a/usr/src/pkgdefs/SUNWiopc.v/preremove	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/pkgdefs/SUNWiopc.v/preremove	Fri Dec 11 10:41:17 2009 -0800
@@ -21,11 +21,9 @@
 #
 
 #
-# Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
-# ident	"%Z%%M%	%I%	%E% SMI"
-#
  
 PATH="/usr/bin:/usr/sbin:${PATH}"
 export PATH
@@ -49,4 +47,9 @@
 	rem_drv -b ${BASEDIR} n2piupc || EXIT=1
 fi
 
+if installed iospc
+then
+	rem_drv -b ${BASEDIR} iospc || EXIT=1
+fi
+
 exit $EXIT
--- a/usr/src/pkgdefs/SUNWiopc.v/prototype_sparc	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/pkgdefs/SUNWiopc.v/prototype_sparc	Fri Dec 11 10:41:17 2009 -0800
@@ -20,11 +20,9 @@
 #
 
 #
-# Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
-# ident	"%Z%%M%	%I%	%E% SMI"
-#
 # This required package information file contains a list of package contents.
 # The 'pkgmk' command uses this file to identify the contents of a package
 # and their location on the development machine when building the package.
@@ -57,4 +55,6 @@
 f none platform/sun4v/kernel/drv/fpc.conf 644 root sys
 f none platform/sun4v/kernel/drv/sparcv9/fpc 755 root sys
 f none platform/sun4v/kernel/drv/n2piupc.conf 644 root sys
+f none platform/sun4v/kernel/drv/sparcv9/iospc 755 root sys
+f none platform/sun4v/kernel/drv/iospc.conf 644 root sys
 f none platform/sun4v/kernel/drv/sparcv9/n2piupc 755 root sys
--- a/usr/src/pkgdefs/SUNWn2cp.v/postinstall	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/pkgdefs/SUNWn2cp.v/postinstall	Fri Dec 11 10:41:17 2009 -0800
@@ -21,7 +21,7 @@
 #
 
 #
-# Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 
@@ -43,7 +43,7 @@
 grep -w n2cp ${NAMEMAJOR} > /dev/null 2>&1
 if [ $? -ne 0 ]
 then
-    $ADD_DRV -i '"SUNW,n2-cwq" "SUNW,vf-cwq"' n2cp || exit 1
+    $ADD_DRV -i '"SUNW,n2-cwq" "SUNW,vf-cwq" "SUNW,kt-cwq"' n2cp || exit 1
 fi
 
 exit 0
--- a/usr/src/pkgdefs/SUNWnxge.v/postinstall	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/pkgdefs/SUNWnxge.v/postinstall	Fri Dec 11 10:41:17 2009 -0800
@@ -20,7 +20,7 @@
 # CDDL HEADER END
 #
 #
-# Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
 
@@ -33,7 +33,7 @@
 # Driver info 
 # 
 DRV=nxge
-DRVALIAS=" \"pciex108e,abcd\" \"SUNW,niusl\" \"SUNW,rock-pciex108e,abcd\""
+DRVALIAS=" \"pciex108e,abcd\" \"SUNW,niusl\" \"SUNW,rock-pciex108e,abcd\" \"SUNW,niusl-kt\""
 
 DRVPERM='* 0600 root sys'
 # POLICY='read_priv_set=net_rawaccess write_priv_set=net_rawaccess'
--- a/usr/src/pkgdefs/SUNWust2.v/prototype_com	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/pkgdefs/SUNWust2.v/prototype_com	Fri Dec 11 10:41:17 2009 -0800
@@ -19,11 +19,9 @@
 # CDDL HEADER END
 #
 #
-# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 #
-# ident	"%Z%%M%	%I%	%E% SMI"
-#
 # This required package information file contains a list of package contents.
 # The 'pkgmk' command uses this file to identify the contents of a package
 # and their location on the development machine when building the package.
@@ -49,9 +47,11 @@
 d none platform/sun4v/kernel 755 root sys
 d none platform/sun4v/kernel/cpu 755 root sys
 d none platform/sun4v/kernel/cpu/sparcv9 755 root sys
+f none platform/sun4v/kernel/cpu/sparcv9/SUNW,UltraSPARC-KT 755 root sys
 f none platform/sun4v/kernel/cpu/sparcv9/SUNW,UltraSPARC-T2 755 root sys
 f none platform/sun4v/kernel/cpu/sparcv9/SUNW,UltraSPARC-T2+ 755 root sys
 d none platform/sun4v/kernel/pcbe 755 root sys
 d none platform/sun4v/kernel/pcbe/sparcv9 755 root sys
+f none platform/sun4v/kernel/pcbe/sparcv9/pcbe.SUNW,UltraSPARC-KT 755 root sys
 f none platform/sun4v/kernel/pcbe/sparcv9/pcbe.SUNW,UltraSPARC-T2 755 root sys
 f none platform/sun4v/kernel/pcbe/sparcv9/pcbe.SUNW,UltraSPARC-T2+ 755 root sys
--- a/usr/src/uts/common/crypto/api/kcf_mac.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/crypto/api/kcf_mac.c	Fri Dec 11 10:41:17 2009 -0800
@@ -180,8 +180,8 @@
 		KCF_PROV_INCRSTATS(pd, error);
 	} else {
 		if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
-		    (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
-		    (data->cd_length > pd->pd_hash_limit)) {
+		    (pd->pd_flags & CRYPTO_HMAC_NO_UPDATE) &&
+		    (data->cd_length > pd->pd_hmac_limit)) {
 			/*
 			 * XXX - We need a check to see if this is indeed
 			 * a HMAC. So far, all kernel clients use
@@ -305,8 +305,8 @@
 		KCF_PROV_INCRSTATS(pd, error);
 	} else {
 		if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
-		    (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
-		    (data->cd_length > pd->pd_hash_limit)) {
+		    (pd->pd_flags & CRYPTO_HMAC_NO_UPDATE) &&
+		    (data->cd_length > pd->pd_hmac_limit)) {
 			/* see comments in crypto_mac() */
 			error = CRYPTO_BUFFER_TOO_BIG;
 		} else {
@@ -475,7 +475,7 @@
 	}
 
 	if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
-	    (pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) {
+	    (pd->pd_flags & CRYPTO_HMAC_NO_UPDATE)) {
 		/*
 		 * The hardware provider has limited HMAC support.
 		 * So, we fallback early here to using a software provider.
--- a/usr/src/uts/common/crypto/io/crypto.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/crypto/io/crypto.c	Fri Dec 11 10:41:17 2009 -0800
@@ -800,18 +800,28 @@
 			fl->fl_set_pin = B_TRUE;
 	}
 
-	fl->prov_is_limited = pd->pd_flags & CRYPTO_HASH_NO_UPDATE;
-	if (fl->prov_is_limited) {
+	fl->prov_is_hash_limited = pd->pd_flags & CRYPTO_HASH_NO_UPDATE;
+	if (fl->prov_is_hash_limited) {
+		fl->prov_hash_limit = min(pd->pd_hash_limit,
+		    min(CRYPTO_MAX_BUFFER_LEN,
+		    curproc->p_task->tk_proj->kpj_data.kpd_crypto_mem_ctl));
+	}
+
+	fl->prov_is_hmac_limited = pd->pd_flags & CRYPTO_HMAC_NO_UPDATE;
+	if (fl->prov_is_hmac_limited) {
+		fl->prov_hmac_limit = min(pd->pd_hmac_limit,
+		    min(CRYPTO_MAX_BUFFER_LEN,
+		    curproc->p_task->tk_proj->kpj_data.kpd_crypto_mem_ctl));
+	}
+
+	if (fl->prov_is_hash_limited || fl->prov_is_hmac_limited) {
 		/*
-		 * XXX - The threshold should ideally be per hash
+		 * XXX - The threshold should ideally be per hash/HMAC
 		 * mechanism. For now, we use the same value for all
-		 * hash mechanisms. Empirical evidence suggests this
+		 * hash/HMAC mechanisms. Empirical evidence suggests this
 		 * is fine.
 		 */
 		fl->prov_hash_threshold = kcf_md5_threshold;
-		fl->prov_hash_limit = min(pd->pd_hash_limit,
-		    min(CRYPTO_MAX_BUFFER_LEN,
-		    curproc->p_task->tk_proj->kpj_data.kpd_crypto_mem_ctl));
 	}
 
 	fl->total_threshold_count = MAX_NUM_THRESHOLD;
--- a/usr/src/uts/common/crypto/io/dprov.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/crypto/io/dprov.c	Fri Dec 11 10:41:17 2009 -0800
@@ -1792,21 +1792,9 @@
 	    DDI_PROP_DONTPASS, "max_digest_sz", INT_MAX);
 	if (dprov_max_digestsz != INT_MAX && dprov_max_digestsz != 0 &&
 	    dprov_max_digestsz != DDI_PROP_NOT_FOUND) {
-		int i, nmechs;
-
 		dprov_no_multipart = B_TRUE;
-		dprov_prov_info.pi_flags |= CRYPTO_HASH_NO_UPDATE;
-
-		/* Set cm_max_input_length for all hash mechs */
-		nmechs = sizeof (dprov_mech_info_tab) /
-		    sizeof (crypto_mech_info_t);
-		for (i = 0; i < nmechs; i++) {
-			if (dprov_mech_info_tab[i].cm_func_group_mask &
-			    CRYPTO_FG_DIGEST) {
-				dprov_mech_info_tab[i].cm_max_input_length =
-				    dprov_max_digestsz;
-			}
-		}
+		dprov_prov_info.pi_flags |=
+		    (CRYPTO_HASH_NO_UPDATE | CRYPTO_HMAC_NO_UPDATE);
 	}
 
 	/* create taskq */
@@ -8557,6 +8545,8 @@
 		if (softc->ds_token_initialized)
 			ext_info->ei_flags |= CRYPTO_EXTF_TOKEN_INITIALIZED;
 
+		ext_info->ei_hash_max_input_len = dprov_max_digestsz;
+		ext_info->ei_hmac_max_input_len = dprov_max_digestsz;
 		error = CRYPTO_SUCCESS;
 		break;
 	}
--- a/usr/src/uts/common/crypto/spi/kcf_spi.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/crypto/spi/kcf_spi.c	Fri Dec 11 10:41:17 2009 -0800
@@ -276,7 +276,7 @@
 	else
 		prov_desc->pd_taskq = NULL;
 
-	/* no kernel session to logical providers */
+	/* no kernel session to logical providers and no pd_flags  */
 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
 		/*
 		 * Open a session for session-oriented providers. This session
@@ -293,11 +293,38 @@
 			    CRYPTO_USER, NULL, 0, prov_desc);
 			ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
 			    B_FALSE);
+			if (ret != CRYPTO_SUCCESS)
+				goto undo_then_bail;
+		}
 
-			if (ret != CRYPTO_SUCCESS) {
-				undo_register_provider(prov_desc, B_TRUE);
-				ret = CRYPTO_FAILED;
-				goto bail;
+		/*
+		 * Get the value for the maximum input length allowed if
+		 * CRYPTO_HASH_NO_UPDATE or CRYPTO_HASH_NO_UPDATE is specified.
+		 */
+		if (prov_desc->pd_flags &
+		    (CRYPTO_HASH_NO_UPDATE | CRYPTO_HMAC_NO_UPDATE)) {
+			kcf_req_params_t params;
+			crypto_provider_ext_info_t ext_info;
+
+			if (KCF_PROV_PROVMGMT_OPS(prov_desc) == NULL)
+				goto undo_then_bail;
+
+			bzero(&ext_info, sizeof (ext_info));
+			KCF_WRAP_PROVMGMT_OPS_PARAMS(&params,
+			    KCF_OP_MGMT_EXTINFO,
+			    0, NULL, 0, NULL, 0, NULL, &ext_info, prov_desc);
+			ret = kcf_submit_request(prov_desc, NULL, NULL,
+			    &params, B_FALSE);
+			if (ret != CRYPTO_SUCCESS)
+				goto undo_then_bail;
+
+			if (prov_desc->pd_flags & CRYPTO_HASH_NO_UPDATE) {
+				prov_desc->pd_hash_limit =
+				    ext_info.ei_hash_max_input_len;
+			}
+			if (prov_desc->pd_flags & CRYPTO_HMAC_NO_UPDATE) {
+				prov_desc->pd_hmac_limit =
+				    ext_info.ei_hmac_max_input_len;
 			}
 		}
 	}
@@ -380,8 +407,12 @@
 
 exit:
 	*handle = prov_desc->pd_kcf_prov_handle;
-	ret = CRYPTO_SUCCESS;
+	KCF_PROV_REFRELE(prov_desc);
+	return (CRYPTO_SUCCESS);
 
+undo_then_bail:
+	undo_register_provider(prov_desc, B_TRUE);
+	ret = CRYPTO_FAILED;
 bail:
 	KCF_PROV_REFRELE(prov_desc);
 	return (ret);
@@ -746,24 +777,6 @@
 			break;
 		}
 
-		if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE &&
-		    mi->cm_func_group_mask & CRYPTO_FG_DIGEST) {
-			/*
-			 * We ask the provider to specify the limit
-			 * per hash mechanism. But, in practice, a
-			 * hardware limitation means all hash mechanisms
-			 * will have the same maximum size allowed for
-			 * input data. So, we make it a per provider
-			 * limit to keep it simple.
-			 */
-			if (mi->cm_max_input_length == 0) {
-				err = CRYPTO_ARGUMENTS_BAD;
-				break;
-			} else {
-				desc->pd_hash_limit = mi->cm_max_input_length;
-			}
-		}
-
 		if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
 		    KCF_SUCCESS)
 			break;
--- a/usr/src/uts/common/io/nxge/npi/npi_fflp.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/npi/npi_fflp.c	Fri Dec 11 10:41:17 2009 -0800
@@ -19,13 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #include <npi_fflp.h>
 #include <nxge_common.h>
 
@@ -71,6 +68,8 @@
 	((class == TCAM_CLASS_ETYPE_1) || (class == TCAM_CLASS_ETYPE_2))
 #define	TCAM_L3_CLASS_VALID(class) \
 	((class >= TCAM_CLASS_IP_USER_4) && (class <= TCAM_CLASS_SCTP_IPV6))
+#define	TCAM_L3_CLASS_VALID_RFNL(class) \
+	((TCAM_L3_CLASS_VALID(class)) || class == TCAM_CLASS_IPV6_FRAG)
 #define	TCAM_CLASS_VALID(class) \
 	((class >= TCAM_CLASS_ETYPE_1) && (class <= TCAM_CLASS_RARP))
 
@@ -1973,6 +1972,111 @@
 }
 
 /*
+ * npi_fflp_cfg_ip_usr_cls_set_iptun()
+ * Configures the TCAM user configurable IP classes. This function sets the
+ * new fields that were added for IP tunneling support
+ *
+ * Input
+ *      handle:		opaque handle interpreted by the underlying OS
+ *      class:       IP Class  class
+ *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
+ *	l4b0_val	value of the first L4 byte to be compared
+ *	l4b0_msk	mask to apply to compare byte 0 of L4
+ *	l4b23_val	values of L4 bytes 2 and 3 to compare
+ *	l4b23_sel	set to 1 to compare L4 bytes 2 and 3.
+ * by default, the class is disabled until explicitly enabled
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_ip_usr_cls_set_iptun(npi_handle_t handle, tcam_class_t class,
+			    uint8_t l4b0_val, uint8_t l4b0_msk,
+			    uint16_t l4b23_val, uint8_t l4b23_sel)
+{
+	uint64_t offset, val;
+	tcam_class_prg_ip_t ip_cls_cfg;
+
+	ASSERT(TCAM_L3_USR_CLASS_VALID(class));
+	if (!TCAM_L3_USR_CLASS_VALID(class)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_fflp_cfg_ip_usr_cls_set:"
+		    " Invalid class %d \n",
+		    class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	offset = GET_TCAM_CLASS_OFFSET(class);
+	REG_PIO_READ64(handle, offset, &ip_cls_cfg.value);
+
+	val = 1;
+	ip_cls_cfg.value |= (val << L3_UCLS_L4_MODE_SH);
+	val = l4b0_val;
+	ip_cls_cfg.value |= (val << L3_UCLS_L4B0_VAL_SH);
+	val = l4b0_msk;
+	ip_cls_cfg.value |= (val << L3_UCLS_L4B0_MASK_SH);
+	val = l4b23_sel;
+	ip_cls_cfg.value |= (val << L3_UCLS_L4B23_SEL_SH);
+	val = l4b23_val;
+	ip_cls_cfg.value |= (val << L3_UCLS_L4B23_VAL_SH);
+
+	ip_cls_cfg.bits.ldw.valid = 0;
+	REG_PIO_WRITE64(handle, offset, ip_cls_cfg.value);
+	return (NPI_SUCCESS);
+}
+
+/*
+ * npi_fflp_cfg_ip_usr_cls_get_iptun()
+ * Retrieves the IP tunneling related settings for the given TCAM user
+ * configurable IP classe.
+ *
+ * Input
+ *      handle:		opaque handle interpreted by the underlying OS
+ *      class:       IP Class  class
+ *		     (TCAM_CLASS_IP_USER_4 <= class <= TCAM_CLASS_IP_USER_7)
+ *	l4b0_val	value of the first L4 byte to be compared
+ *	l4b0_msk	mask to apply to compare byte 0 of L4
+ *	l4b23_val	values of L4 bytes 2 and 3 to compare
+ *	l4b23_sel	set to 1 to compare L4 bytes 2 and 3.
+ * by default, the class is disabled until explicitly enabled
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_ip_usr_cls_get_iptun(npi_handle_t handle, tcam_class_t class,
+			    uint8_t *l4b0_val, uint8_t *l4b0_msk,
+			    uint16_t *l4b23_val, uint8_t *l4b23_sel)
+{
+	uint64_t offset;
+	tcam_class_prg_ip_t ip_cls_cfg;
+
+	ASSERT(TCAM_L3_USR_CLASS_VALID(class));
+	if (!TCAM_L3_USR_CLASS_VALID(class)) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_fflp_cfg_ip_usr_cls_set:"
+		    " Invalid class %d \n",
+		    class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	offset = GET_TCAM_CLASS_OFFSET(class);
+	REG_PIO_READ64(handle, offset, &ip_cls_cfg.value);
+
+	*l4b0_val = (ip_cls_cfg.value >> L3_UCLS_L4B0_VAL_SH) &
+	    L3_UCLS_L4B0_VAL_MSK;
+	*l4b0_msk = (ip_cls_cfg.value >> L3_UCLS_L4B0_MASK_SH) &
+	    L3_UCLS_L4B0_MASK_MSK;
+	*l4b23_sel = (ip_cls_cfg.value >> L3_UCLS_L4B23_SEL_SH) &
+	    L3_UCLS_L4B23_SEL_MSK;
+	*l4b23_val = (ip_cls_cfg.value >> L3_UCLS_L4B23_VAL_SH) &
+	    L3_UCLS_L4B23_VAL_MSK;
+
+	return (NPI_SUCCESS);
+
+}
+
+/*
  * npi_fflp_cfg_ip_usr_cls_enable()
  * Enable previously configured TCAM user configurable IP classes.
  *
@@ -2280,6 +2384,241 @@
 
 }
 
+/*
+ * npi_fflp_cfg_ip_cls_flow_key_rfnl ()
+ *
+ * Configures the flow key generation for the IP classes
+ * Flow key is used to generate the H1 hash function value
+ * The fields used for the generation are configured using this
+ * NPI function.
+ *
+ * Input
+ *      handle:	opaque handle interpreted by the underlying OS
+ *      l3_class:        IP class to configure flow key generation
+ *      cfg:             Configuration bits:
+ *		     l4_xor_sel:    bit field to select the L4 payload
+ *				    bytes for X-OR to get hash key.
+ *		     use_l4_md:	    Set to 1 for enabling L4-mode.
+ *		     use_sym:	    Set to 1 to use symmetric mode.
+ *                   use_proto:     Use IP proto field
+ *                   use_dport:     use l4 destination port
+ *                   use_sport:     use l4 source port
+ *                   ip_opts_exist: IP Options Present
+ *                   use_daddr:     use ip dest address
+ *                   use_saddr:     use ip source address
+ *                   use_vlan:      use VLAN ID
+ *                   use_l2da:      use L2 Dest MAC Address
+ *                   use_portnum:   use L2 virtual port number
+ *
+ *
+ * Return
+ * NPI success/failure status code
+ */
+npi_status_t
+npi_fflp_cfg_ip_cls_flow_key_rfnl(npi_handle_t handle, tcam_class_t l3_class,
+		flow_key_cfg_t *cfg)
+{
+	uint64_t offset;
+	flow_class_key_ip_t flow_cfg_reg;
+
+	ASSERT(TCAM_L3_CLASS_VALID_RFNL(l3_class));
+	if (!(TCAM_L3_CLASS_VALID_RFNL(l3_class))) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_fflp_cfg_ip_cls_flow_key_rfnl:"
+		    " Invalid class %d \n",
+		    l3_class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	if (l3_class == TCAM_CLASS_IPV6_FRAG) {
+		offset = FFLP_FLOW_KEY_IP6_FRAG_REG;
+	} else {
+		offset = GET_FLOW_KEY_OFFSET(l3_class);
+	}
+
+	flow_cfg_reg.value = 0;
+
+	flow_cfg_reg.bits.ldw.l4_xor = cfg->l4_xor_sel;
+
+	if (cfg->use_l4_md)
+		flow_cfg_reg.bits.ldw.l4_mode = 1;
+
+	if (cfg->use_sym)
+		flow_cfg_reg.bits.ldw.sym = 1;
+
+	if (cfg->use_proto) {
+		flow_cfg_reg.bits.ldw.proto = 1;
+	}
+
+	if (cfg->use_dport) {
+		flow_cfg_reg.bits.ldw.l4_1 = 2;
+		if (cfg->ip_opts_exist)
+			flow_cfg_reg.bits.ldw.l4_1 = 3;
+	}
+
+	if (cfg->use_sport) {
+		flow_cfg_reg.bits.ldw.l4_0 = 2;
+		if (cfg->ip_opts_exist)
+			flow_cfg_reg.bits.ldw.l4_0 = 3;
+	}
+
+	if (cfg->use_daddr) {
+		flow_cfg_reg.bits.ldw.ipda = BIT_ENABLE;
+	}
+
+	if (cfg->use_saddr) {
+		flow_cfg_reg.bits.ldw.ipsa = BIT_ENABLE;
+	}
+
+	if (cfg->use_vlan) {
+		flow_cfg_reg.bits.ldw.vlan = BIT_ENABLE;
+	}
+
+	if (cfg->use_l2da) {
+		flow_cfg_reg.bits.ldw.l2da = BIT_ENABLE;
+	}
+
+	if (cfg->use_portnum) {
+		flow_cfg_reg.bits.ldw.port = BIT_ENABLE;
+	}
+
+	REG_PIO_WRITE64(handle, offset, flow_cfg_reg.value);
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_fflp_cfg_sym_ip_cls_flow_key(npi_handle_t handle, tcam_class_t l3_class,
+		boolean_t enable)
+{
+	uint64_t offset;
+	flow_class_key_ip_t flow_cfg_reg;
+
+	ASSERT(TCAM_L3_CLASS_VALID_RFNL(l3_class));
+	if (!(TCAM_L3_CLASS_VALID_RFNL(l3_class))) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_fflp_cfg_sym_ip_cls_flow_key:"
+		    " Invalid class %d \n",
+		    l3_class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	if (l3_class == TCAM_CLASS_IPV6_FRAG) {
+		offset = FFLP_FLOW_KEY_IP6_FRAG_REG;
+	} else {
+		offset = GET_FLOW_KEY_OFFSET(l3_class);
+	}
+
+	REG_PIO_READ64(handle, offset, &flow_cfg_reg.value);
+
+	if (enable && flow_cfg_reg.bits.ldw.sym == 0) {
+		flow_cfg_reg.bits.ldw.sym = 1;
+		REG_PIO_WRITE64(handle, offset, flow_cfg_reg.value);
+	} else if (!enable && flow_cfg_reg.bits.ldw.sym == 1) {
+		flow_cfg_reg.bits.ldw.sym = 0;
+		REG_PIO_WRITE64(handle, offset, flow_cfg_reg.value);
+	}
+
+	return (NPI_SUCCESS);
+
+}
+
+npi_status_t
+npi_fflp_cfg_ip_cls_flow_key_get_rfnl(npi_handle_t handle,
+				    tcam_class_t l3_class,
+				    flow_key_cfg_t *cfg)
+{
+	uint64_t offset;
+	flow_class_key_ip_t flow_cfg_reg;
+
+	ASSERT(TCAM_L3_CLASS_VALID_RFNL(l3_class));
+	if (!(TCAM_L3_CLASS_VALID_RFNL(l3_class))) {
+		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
+		    " npi_fflp_cfg_ip_cls_flow_key_get_rfnl:"
+		    " Invalid class %d \n",
+		    l3_class));
+		return (NPI_FFLP_TCAM_CLASS_INVALID);
+	}
+
+	if (l3_class == TCAM_CLASS_IPV6_FRAG) {
+		offset = FFLP_FLOW_KEY_IP6_FRAG_REG;
+	} else {
+		offset = GET_FLOW_KEY_OFFSET(l3_class);
+	}
+
+	cfg->l4_xor_sel = 0;
+	cfg->use_l4_md = 0;
+	cfg->use_sym = 0;
+	cfg->use_proto = 0;
+	cfg->use_dport = 0;
+	cfg->use_sport = 0;
+	cfg->ip_opts_exist = 0;
+	cfg->use_daddr = 0;
+	cfg->use_saddr = 0;
+	cfg->use_vlan = 0;
+	cfg->use_l2da = 0;
+	cfg->use_portnum  = 0;
+
+	REG_PIO_READ64(handle, offset, &flow_cfg_reg.value);
+
+	cfg->l4_xor_sel = flow_cfg_reg.bits.ldw.l4_xor;
+
+	if (flow_cfg_reg.bits.ldw.l4_mode)
+		cfg->use_l4_md = 1;
+
+	if (flow_cfg_reg.bits.ldw.sym)
+		cfg->use_sym = 1;
+
+	if (flow_cfg_reg.bits.ldw.proto) {
+		cfg->use_proto = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.l4_1 == 2) {
+		cfg->use_dport = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.l4_1 == 3) {
+		cfg->use_dport = 1;
+		cfg->ip_opts_exist = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.l4_0 == 2) {
+		cfg->use_sport = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.l4_0 == 3) {
+		cfg->use_sport = 1;
+		cfg->ip_opts_exist = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.ipda) {
+		cfg->use_daddr = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.ipsa) {
+		cfg->use_saddr = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.vlan) {
+		cfg->use_vlan = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.l2da) {
+		cfg->use_l2da = 1;
+	}
+
+	if (flow_cfg_reg.bits.ldw.port) {
+		cfg->use_portnum = 1;
+	}
+
+	NPI_DEBUG_MSG((handle.function, NPI_FFLP_CTL,
+	    " npi_fflp_cfg_ip_cls_flow_get %llx \n",
+	    flow_cfg_reg.value));
+
+	return (NPI_SUCCESS);
+
+}
+
 npi_status_t
 npi_fflp_cfg_ip_cls_tcam_key_get(npi_handle_t handle,
 			    tcam_class_t l3_class, tcam_key_cfg_t *cfg)
@@ -2315,7 +2654,7 @@
 	}
 
 	if (tcam_cls_cfg.bits.ldw.tsel) {
-		cfg->lookup_enable	= 1;
+		cfg->lookup_enable = 1;
 	}
 
 	NPI_DEBUG_MSG((handle.function, NPI_CTL,
--- a/usr/src/uts/common/io/nxge/npi/npi_fflp.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/npi/npi_fflp.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,15 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef _NPI_FFLP_H
 #define	_NPI_FFLP_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifdef	__cplusplus
 extern "C" {
 #endif
@@ -765,6 +763,12 @@
 					uint8_t, uint8_t,
 					uint8_t, uint8_t);
 
+npi_status_t npi_fflp_cfg_ip_usr_cls_set_iptun(npi_handle_t,
+		tcam_class_t, uint8_t, uint8_t, uint16_t, uint8_t);
+
+npi_status_t npi_fflp_cfg_ip_usr_cls_get_iptun(npi_handle_t,
+		tcam_class_t, uint8_t *, uint8_t *, uint16_t *, uint8_t *);
+
 /*
  * npi_fflp_cfg_ip_usr_cls_enable()
  * Enable previously configured TCAM user configurable IP classes.
@@ -860,15 +864,21 @@
 npi_status_t npi_fflp_cfg_ip_cls_flow_key(npi_handle_t,
 			    tcam_class_t, flow_key_cfg_t *);
 
-
-
 npi_status_t npi_fflp_cfg_ip_cls_flow_key_get(npi_handle_t,
 				    tcam_class_t,
 				    flow_key_cfg_t *);
 
+npi_status_t npi_fflp_cfg_ip_cls_flow_key_rfnl(npi_handle_t,
+		tcam_class_t, flow_key_cfg_t *);
+
+npi_status_t npi_fflp_cfg_sym_ip_cls_flow_key(npi_handle_t, tcam_class_t,
+					boolean_t);
+
+npi_status_t npi_fflp_cfg_ip_cls_flow_key_get_rfnl(npi_handle_t,
+			tcam_class_t, flow_key_cfg_t *);
 
 npi_status_t npi_fflp_cfg_ip_cls_tcam_key_get(npi_handle_t,
-				    tcam_class_t, tcam_key_cfg_t *);
+			tcam_class_t, tcam_key_cfg_t *);
 /*
  * npi_fflp_cfg_hash_h1poly()
  * Initializes the H1 hash generation logic.
--- a/usr/src/uts/common/io/nxge/npi/npi_rxdma.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/npi/npi_rxdma.c	Fri Dec 11 10:41:17 2009 -0800
@@ -19,12 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #include <npi_rxdma.h>
 #include <npi_rx_rd64.h>
 #include <npi_rx_wr64.h>
@@ -594,7 +592,7 @@
  */
 npi_status_t
 npi_rxdma_cfg_rdc_ring(npi_handle_t handle, uint8_t rdc,
-			    rdc_desc_cfg_t *rdc_desc_cfg)
+			    rdc_desc_cfg_t *rdc_desc_cfg, boolean_t new_off)
 {
 	rbr_cfig_a_t cfga;
 	rbr_cfig_b_t cfgb;
@@ -640,10 +638,38 @@
 	if (rdc_desc_cfg->full_hdr == 1)
 		cfg2.bits.ldw.full_hdr = 1;
 
-	if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
-		cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
+	if (new_off) {
+		if (RXDMA_RF_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
+			switch (rdc_desc_cfg->offset) {
+			case SW_OFFSET_NO_OFFSET:
+			case SW_OFFSET_64:
+			case SW_OFFSET_128:
+			case SW_OFFSET_192:
+				cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
+				cfg2.bits.ldw.offset256 = 0;
+				break;
+			case SW_OFFSET_256:
+			case SW_OFFSET_320:
+			case SW_OFFSET_384:
+			case SW_OFFSET_448:
+				cfg2.bits.ldw.offset =
+				    rdc_desc_cfg->offset & 0x3;
+				cfg2.bits.ldw.offset256 = 1;
+				break;
+			default:
+				cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
+				cfg2.bits.ldw.offset256 = 0;
+			}
+		} else {
+			cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
+			cfg2.bits.ldw.offset256 = 0;
+		}
 	} else {
-		cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
+		if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
+			cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
+		} else {
+			cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
+		}
 	}
 
 		/* rbr config */
--- a/usr/src/uts/common/io/nxge/npi/npi_rxdma.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/npi/npi_rxdma.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,15 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef _NPI_RXDMA_H
 #define	_NPI_RXDMA_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifdef	__cplusplus
 extern "C" {
 #endif
@@ -103,6 +101,16 @@
 	    (offset == SW_OFFSET_64) || \
 	    (offset == SW_OFFSET_128))
 
+#define	RXDMA_RF_BUFF_OFFSET_VALID(offset) \
+	((offset == SW_OFFSET_NO_OFFSET) || \
+	    (offset == SW_OFFSET_64) || \
+	    (offset == SW_OFFSET_128) || \
+	    (offset == SW_OFFSET_192) || \
+	    (offset == SW_OFFSET_256) || \
+	    (offset == SW_OFFSET_320) || \
+	    (offset == SW_OFFSET_384) || \
+	    (offset == SW_OFFSET_448))
+
 
 #define	RXDMA_RCR_TO_VALID(tov) ((tov) && (tov < 64))
 #define	RXDMA_RCR_THRESH_VALID(thresh) ((thresh) && (thresh < 512))
@@ -548,7 +556,7 @@
 
 
 npi_status_t npi_rxdma_cfg_rdc_ring(npi_handle_t, uint8_t,
-				    rdc_desc_cfg_t *);
+				    rdc_desc_cfg_t *, boolean_t);
 
 
 
--- a/usr/src/uts/common/io/nxge/nxge_fflp.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/nxge_fflp.c	Fri Dec 11 10:41:17 2009 -0800
@@ -19,12 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #include <npi_fflp.h>
 #include <npi_mac.h>
 #include <nxge_defs.h>
@@ -53,9 +51,14 @@
 	tcam_entry_t *);
 static void nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t, flow_spec_t *,
 	tcam_entry_t *);
-static uint8_t nxge_get_rdc_offset(p_nxge_t, uint8_t, intptr_t);
-static uint8_t nxge_get_rdc_group(p_nxge_t, uint8_t, intptr_t);
-static tcam_location_t nxge_get_tcam_location(p_nxge_t, uint8_t);
+static uint8_t nxge_get_rdc_offset(p_nxge_t, uint8_t, uint64_t);
+static uint8_t nxge_get_rdc_group(p_nxge_t, uint8_t, uint64_t);
+static uint16_t nxge_tcam_get_index(p_nxge_t, uint16_t);
+static uint32_t nxge_tcam_cls_to_flow(uint32_t);
+static uint8_t nxge_iptun_pkt_type_to_pid(uint8_t);
+static npi_status_t nxge_set_iptun_usr_cls_reg(p_nxge_t, uint64_t,
+					iptun_cfg_t *);
+static boolean_t nxge_is_iptun_cls_present(p_nxge_t, uint8_t, int *);
 
 /*
  * functions used outside this file
@@ -68,6 +71,17 @@
 nxge_status_t nxge_add_fcram_entry(p_nxge_t, flow_resource_t *);
 nxge_status_t nxge_flow_get_hash(p_nxge_t, flow_resource_t *,
 	uint32_t *, uint16_t *);
+int nxge_get_valid_tcam_cnt(p_nxge_t);
+void nxge_get_tcam_entry_all(p_nxge_t, rx_class_cfg_t *);
+void nxge_get_tcam_entry(p_nxge_t, flow_resource_t *);
+void nxge_del_tcam_entry(p_nxge_t, uint32_t);
+void nxge_add_iptun_class(p_nxge_t, iptun_cfg_t *, uint8_t *);
+void nxge_cfg_iptun_hash(p_nxge_t, iptun_cfg_t *, uint8_t);
+void nxge_del_iptun_class(p_nxge_t, uint8_t);
+void nxge_get_iptun_class(p_nxge_t, iptun_cfg_t *, uint8_t);
+void nxge_set_ip_cls_sym(p_nxge_t, uint8_t, uint8_t);
+void nxge_get_ip_cls_sym(p_nxge_t, uint8_t, uint8_t *);
+
 
 nxge_status_t
 nxge_tcam_dump_entry(p_nxge_t nxgep, uint32_t location)
@@ -812,7 +826,6 @@
 nxge_status_t
 nxge_classify_init_sw(p_nxge_t nxgep)
 {
-	int alloc_size;
 	nxge_classify_t *classify_ptr;
 
 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_sw"));
@@ -823,19 +836,10 @@
 		    "nxge_classify_init_sw already init"));
 		return (NXGE_OK);
 	}
-	/* Init SW structures */
-	classify_ptr->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
-
-	/* init data structures, based on HW type */
-	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
-		classify_ptr->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
-		/*
-		 * check if fcram based classification is required and init the
-		 * flow storage
-		 */
-	}
-	alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size;
-	classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, NULL);
+
+	classify_ptr->tcam_size = nxgep->nxge_hw_p->tcam_size / nxgep->nports;
+	classify_ptr->tcam_entries = (tcam_flow_spec_t *)nxgep->nxge_hw_p->tcam;
+	classify_ptr->tcam_top = nxgep->function_num;
 
 	/* Init defaults */
 	/*
@@ -855,52 +859,13 @@
 nxge_status_t
 nxge_classify_exit_sw(p_nxge_t nxgep)
 {
-	int alloc_size;
-	nxge_classify_t *classify_ptr;
-	int fsize;
-
 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_exit_sw"));
-	classify_ptr = &nxgep->classifier;
-
-	fsize = sizeof (tcam_flow_spec_t);
-	if (classify_ptr->tcam_entries) {
-		alloc_size = fsize * classify_ptr->tcam_size;
-		KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size);
-	}
 	nxgep->classifier.state = NULL;
-
 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_exit_sw"));
 	return (NXGE_OK);
 }
 
 /*
- * Figures out the location where the TCAM entry is
- * to be inserted.
- *
- * The current implementation is just a place holder and it
- * returns the next tcam location.
- * The real location determining algorithm would consider
- * the priority, partition etc ... before deciding which
- * location to insert.
- *
- */
-
-/* ARGSUSED */
-static tcam_location_t
-nxge_get_tcam_location(p_nxge_t nxgep, uint8_t class)
-{
-	tcam_location_t location;
-
-	location = nxgep->classifier.tcam_location;
-	nxgep->classifier.tcam_location = (location + nxgep->nports) %
-	    nxgep->classifier.tcam_size;
-	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
-	    "nxge_get_tcam_location: location %d next %d \n",
-	    location, nxgep->classifier.tcam_location));
-	return (location);
-}
-
-/*
  * Figures out the RDC Group for the entry
  *
  * The current implementation is just a place holder and it
@@ -912,7 +877,7 @@
 
 /* ARGSUSED */
 static uint8_t
-nxge_get_rdc_group(p_nxge_t nxgep, uint8_t class, intptr_t cookie)
+nxge_get_rdc_group(p_nxge_t nxgep, uint8_t class, uint64_t cookie)
 {
 	int use_port_rdc_grp = 0;
 	uint8_t rdc_grp = 0;
@@ -933,7 +898,7 @@
 
 /* ARGSUSED */
 static uint8_t
-nxge_get_rdc_offset(p_nxge_t nxgep, uint8_t class, intptr_t cookie)
+nxge_get_rdc_offset(p_nxge_t nxgep, uint8_t class, uint64_t cookie)
 {
 	return ((uint8_t)cookie);
 }
@@ -962,6 +927,8 @@
 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
 	    tcam_ptr->ip4_proto_mask,
 	    IPPROTO_UDP);
+	tcam_ptr->ip4_tos_key = fspec_key->tos;
+	tcam_ptr->ip4_tos_mask = fspec_mask->tos;
 }
 
 static void
@@ -992,6 +959,8 @@
 	    fspec_key->pdst, fspec_key->psrc);
 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
 	    fspec_mask->pdst, fspec_mask->psrc);
+	tcam_ptr->ip6_tos_key = fspec_key->tos;
+	tcam_ptr->ip6_tos_mask = fspec_mask->tos;
 }
 
 /* ARGSUSED */
@@ -1017,6 +986,8 @@
 	    tcam_ptr->ip4_class_mask, TCAM_CLASS_TCP_IPV4);
 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
 	    tcam_ptr->ip4_proto_mask, IPPROTO_TCP);
+	tcam_ptr->ip4_tos_key = fspec_key->tos;
+	tcam_ptr->ip4_tos_mask = fspec_mask->tos;
 }
 
 /* ARGSUSED */
@@ -1042,6 +1013,8 @@
 	    fspec_key->pdst, fspec_key->psrc);
 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
 	    fspec_mask->pdst, fspec_mask->psrc);
+	tcam_ptr->ip4_tos_key = fspec_key->tos;
+	tcam_ptr->ip4_tos_mask = fspec_mask->tos;
 }
 
 static void
@@ -1073,6 +1046,8 @@
 	    fspec_key->pdst, fspec_key->psrc);
 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
 	    fspec_mask->pdst, fspec_mask->psrc);
+	tcam_ptr->ip6_tos_key = fspec_key->tos;
+	tcam_ptr->ip6_tos_mask = fspec_mask->tos;
 }
 
 static void
@@ -1104,8 +1079,113 @@
 	    fspec_key->pdst, fspec_key->psrc);
 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
 	    fspec_mask->pdst, fspec_mask->psrc);
+	tcam_ptr->ip6_tos_key = fspec_key->tos;
+	tcam_ptr->ip6_tos_mask = fspec_mask->tos;
 }
 
+/* ARGSUSED */
+static void
+nxge_fill_tcam_entry_ah_esp(p_nxge_t nxgep, flow_spec_t *flow_spec,
+	tcam_entry_t *tcam_ptr)
+{
+	ahip4_spec_t *fspec_key;
+	ahip4_spec_t *fspec_mask;
+
+	fspec_key = (ahip4_spec_t *)&flow_spec->uh.ahip4spec;
+	fspec_mask = (ahip4_spec_t *)&flow_spec->um.ahip4spec;
+
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
+	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
+
+	tcam_ptr->ip4_port_key = fspec_key->spi;
+	tcam_ptr->ip4_port_mask = fspec_mask->spi;
+
+	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
+	    tcam_ptr->ip4_class_mask,
+	    TCAM_CLASS_AH_ESP_IPV4);
+
+	if (flow_spec->flow_type == FSPEC_AHIP4) {
+		TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
+		    tcam_ptr->ip4_proto_mask, IPPROTO_AH);
+	} else {
+		TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
+		    tcam_ptr->ip4_proto_mask, IPPROTO_ESP);
+	}
+	tcam_ptr->ip4_tos_key = fspec_key->tos;
+	tcam_ptr->ip4_tos_mask = fspec_mask->tos;
+}
+
+static void
+nxge_fill_tcam_entry_ah_esp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
+	tcam_entry_t *tcam_ptr)
+{
+	ahip6_spec_t *fspec_key;
+	ahip6_spec_t *fspec_mask;
+	p_nxge_class_pt_cfg_t p_class_cfgp;
+
+	fspec_key = (ahip6_spec_t *)&flow_spec->uh.ahip6spec;
+	fspec_mask = (ahip6_spec_t *)&flow_spec->um.ahip6spec;
+
+	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
+	if (p_class_cfgp->class_cfg[TCAM_CLASS_AH_ESP_IPV6] &
+	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
+	} else {
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
+		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
+	}
+
+	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
+	    tcam_ptr->ip6_class_mask, TCAM_CLASS_AH_ESP_IPV6);
+
+	if (flow_spec->flow_type == FSPEC_AHIP6) {
+		TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
+		    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_AH);
+	} else {
+		TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
+		    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_ESP);
+	}
+	tcam_ptr->ip6_port_key = fspec_key->spi;
+	tcam_ptr->ip6_port_mask = fspec_mask->spi;
+	tcam_ptr->ip6_tos_key = fspec_key->tos;
+	tcam_ptr->ip6_tos_mask = fspec_mask->tos;
+}
+
+/* ARGSUSED */
+static void
+nxge_fill_tcam_entry_ip_usr(p_nxge_t nxgep, flow_spec_t *flow_spec,
+	tcam_entry_t *tcam_ptr, tcam_class_t class)
+{
+	ip_user_spec_t *fspec_key;
+	ip_user_spec_t *fspec_mask;
+
+	fspec_key = (ip_user_spec_t *)&flow_spec->uh.ip_usr_spec;
+	fspec_mask = (ip_user_spec_t *)&flow_spec->um.ip_usr_spec;
+
+	if (fspec_key->ip_ver == FSPEC_IP4) {
+		TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
+		TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
+		TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
+		TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
+
+		tcam_ptr->ip4_port_key = fspec_key->l4_4_bytes;
+		tcam_ptr->ip4_port_mask = fspec_mask->l4_4_bytes;
+
+		TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
+		    tcam_ptr->ip4_class_mask, class);
+
+		tcam_ptr->ip4_proto_key = fspec_key->proto;
+		tcam_ptr->ip4_proto_mask = fspec_mask->proto;
+
+		tcam_ptr->ip4_tos_key = fspec_key->tos;
+		tcam_ptr->ip4_tos_mask = fspec_mask->tos;
+	}
+}
+
+
 nxge_status_t
 nxge_flow_get_hash(p_nxge_t nxgep, flow_resource_t *flow_res,
 	uint32_t *H1, uint16_t *H2)
@@ -1192,14 +1272,15 @@
 nxge_add_tcam_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
 {
 	npi_handle_t handle;
-	intptr_t channel_cookie;
-	intptr_t flow_cookie;
+	uint64_t channel_cookie;
+	uint64_t flow_cookie;
 	flow_spec_t *flow_spec;
 	npi_status_t rs = NPI_SUCCESS;
 	tcam_entry_t tcam_ptr;
-	tcam_location_t location = 0;
+	tcam_location_t location;
 	uint8_t offset, rdc_grp;
 	p_nxge_hw_list_t hw_p;
+	uint64_t class;
 
 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_tcam_entry"));
 	handle = nxgep->npi_reg_handle;
@@ -1208,12 +1289,110 @@
 	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
 	flow_cookie = flow_res->flow_cookie;
 	channel_cookie = flow_res->channel_cookie;
+	location = (tcam_location_t)nxge_tcam_get_index(nxgep,
+	    (uint16_t)flow_res->location);
+
+	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    " nxge_add_tcam_entry: common hardware not set",
+		    nxgep->niu_type));
+		return (NXGE_ERROR);
+	}
+
+	if (flow_spec->flow_type == FSPEC_IP_USR) {
+		int i;
+		int add_usr_cls = 0;
+		int ipv6 = 0;
+		ip_user_spec_t *uspec = &flow_spec->uh.ip_usr_spec;
+		ip_user_spec_t *umask = &flow_spec->um.ip_usr_spec;
+		nxge_usr_l3_cls_t *l3_ucls_p;
+
+		MUTEX_ENTER(&hw_p->nxge_tcam_lock);
+
+		for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
+			l3_ucls_p = &hw_p->tcam_l3_prog_cls[i];
+			if (l3_ucls_p->valid && l3_ucls_p->tcam_ref_cnt) {
+				if (uspec->proto == l3_ucls_p->pid) {
+					class = l3_ucls_p->cls;
+					l3_ucls_p->tcam_ref_cnt++;
+					add_usr_cls = 1;
+					break;
+				}
+			} else if (l3_ucls_p->valid == 0) {
+				/* Program new user IP class */
+				switch (i) {
+				case 0:
+					class = TCAM_CLASS_IP_USER_4;
+					break;
+				case 1:
+					class = TCAM_CLASS_IP_USER_5;
+					break;
+				case 2:
+					class = TCAM_CLASS_IP_USER_6;
+					break;
+				case 3:
+					class = TCAM_CLASS_IP_USER_7;
+					break;
+				default:
+					break;
+				}
+				if (uspec->ip_ver == FSPEC_IP6)
+					ipv6 = 1;
+				rs = npi_fflp_cfg_ip_usr_cls_set(handle,
+				    (tcam_class_t)class, uspec->tos,
+				    umask->tos, uspec->proto, ipv6);
+				if (rs != NPI_SUCCESS)
+					goto fail;
+
+				rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
+				    (tcam_class_t)class);
+				if (rs != NPI_SUCCESS)
+					goto fail;
+
+				l3_ucls_p->cls = class;
+				l3_ucls_p->pid = uspec->proto;
+				l3_ucls_p->tcam_ref_cnt++;
+				l3_ucls_p->valid = 1;
+				add_usr_cls = 1;
+				break;
+			} else if (l3_ucls_p->tcam_ref_cnt == 0 &&
+			    uspec->proto == l3_ucls_p->pid) {
+				/*
+				 * The class has already been programmed,
+				 * probably for flow hash
+				 */
+				class = l3_ucls_p->cls;
+				if (uspec->ip_ver == FSPEC_IP6)
+					ipv6 = 1;
+				rs = npi_fflp_cfg_ip_usr_cls_set(handle,
+				    (tcam_class_t)class, uspec->tos,
+				    umask->tos, uspec->proto, ipv6);
+				if (rs != NPI_SUCCESS)
+					goto fail;
+
+				rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
+				    (tcam_class_t)class);
+				if (rs != NPI_SUCCESS)
+					goto fail;
+
+				l3_ucls_p->pid = uspec->proto;
+				l3_ucls_p->tcam_ref_cnt++;
+				add_usr_cls = 1;
+				break;
+			}
+		}
+		if (!add_usr_cls) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    "nxge_add_tcam_entry: Could not find/insert class"
+			    "for pid %d", uspec->proto));
+			goto fail;
+		}
+		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+	}
 
 	switch (flow_spec->flow_type) {
 	case FSPEC_TCPIP4:
 		nxge_fill_tcam_entry_tcp(nxgep, flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-		    TCAM_CLASS_TCP_IPV4);
 		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV4,
 		    flow_cookie);
 		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV4,
@@ -1222,8 +1401,6 @@
 
 	case FSPEC_UDPIP4:
 		nxge_fill_tcam_entry_udp(nxgep, flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-		    TCAM_CLASS_UDP_IPV4);
 		rdc_grp = nxge_get_rdc_group(nxgep,
 		    TCAM_CLASS_UDP_IPV4,
 		    flow_cookie);
@@ -1235,8 +1412,6 @@
 	case FSPEC_TCPIP6:
 		nxge_fill_tcam_entry_tcp_ipv6(nxgep,
 		    flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-		    TCAM_CLASS_TCP_IPV6);
 		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV6,
 		    flow_cookie);
 		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV6,
@@ -1246,65 +1421,85 @@
 	case FSPEC_UDPIP6:
 		nxge_fill_tcam_entry_udp_ipv6(nxgep,
 		    flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-		    TCAM_CLASS_UDP_IPV6);
 		rdc_grp = nxge_get_rdc_group(nxgep,
 		    TCAM_CLASS_UDP_IPV6,
-		    channel_cookie);
+		    flow_cookie);
 		offset = nxge_get_rdc_offset(nxgep,
 		    TCAM_CLASS_UDP_IPV6,
-		    flow_cookie);
+		    channel_cookie);
 		break;
 
 	case FSPEC_SCTPIP4:
 		nxge_fill_tcam_entry_sctp(nxgep, flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-		    TCAM_CLASS_SCTP_IPV4);
 		rdc_grp = nxge_get_rdc_group(nxgep,
 		    TCAM_CLASS_SCTP_IPV4,
-		    channel_cookie);
+		    flow_cookie);
 		offset = nxge_get_rdc_offset(nxgep,
 		    TCAM_CLASS_SCTP_IPV4,
-		    flow_cookie);
+		    channel_cookie);
 		break;
 
 	case FSPEC_SCTPIP6:
 		nxge_fill_tcam_entry_sctp_ipv6(nxgep,
 		    flow_spec, &tcam_ptr);
-		location = nxge_get_tcam_location(nxgep,
-		    TCAM_CLASS_SCTP_IPV4);
 		rdc_grp = nxge_get_rdc_group(nxgep,
 		    TCAM_CLASS_SCTP_IPV6,
+		    flow_cookie);
+		offset = nxge_get_rdc_offset(nxgep,
+		    TCAM_CLASS_SCTP_IPV6,
 		    channel_cookie);
+		break;
+
+	case FSPEC_AHIP4:
+	case FSPEC_ESPIP4:
+		nxge_fill_tcam_entry_ah_esp(nxgep, flow_spec, &tcam_ptr);
+		rdc_grp = nxge_get_rdc_group(nxgep,
+		    TCAM_CLASS_AH_ESP_IPV4,
+		    flow_cookie);
 		offset = nxge_get_rdc_offset(nxgep,
-		    TCAM_CLASS_SCTP_IPV6,
-		    flow_cookie);
+		    TCAM_CLASS_AH_ESP_IPV4,
+		    channel_cookie);
 		break;
 
+	case FSPEC_AHIP6:
+	case FSPEC_ESPIP6:
+		nxge_fill_tcam_entry_ah_esp_ipv6(nxgep,
+		    flow_spec, &tcam_ptr);
+		rdc_grp = nxge_get_rdc_group(nxgep,
+		    TCAM_CLASS_AH_ESP_IPV6,
+		    flow_cookie);
+		offset = nxge_get_rdc_offset(nxgep,
+		    TCAM_CLASS_AH_ESP_IPV6,
+		    channel_cookie);
+		break;
+
+	case FSPEC_IP_USR:
+		nxge_fill_tcam_entry_ip_usr(nxgep, flow_spec, &tcam_ptr,
+		    (tcam_class_t)class);
+		rdc_grp = nxge_get_rdc_group(nxgep,
+		    (tcam_class_t)class, flow_cookie);
+		offset = nxge_get_rdc_offset(nxgep,
+		    (tcam_class_t)class, channel_cookie);
+		break;
 	default:
-		return (NXGE_OK);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "nxge_add_tcam_entry: Unknown flow spec 0x%x",
+		    flow_spec->flow_type));
+		return (NXGE_ERROR);
 	}
 
 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
 	    " nxge_add_tcam_entry write"
 	    " for location %d offset %d", location, offset));
 
-	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		    " nxge_add_tcam_entry: common hardware not set",
-		    nxgep->niu_type));
-		return (NXGE_ERROR);
-	}
-
 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
 	rs = npi_fflp_tcam_entry_write(handle, location, &tcam_ptr);
 
 	if (rs & NPI_FFLP_ERROR) {
-		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
 		    " nxge_add_tcam_entry write"
 		    " failed for location %d", location));
-		return (NXGE_ERROR | rs);
+		goto fail;
 	}
 
 	tcam_ptr.match_action.value = 0;
@@ -1312,24 +1507,29 @@
 	tcam_ptr.match_action.bits.ldw.offset = offset;
 	tcam_ptr.match_action.bits.ldw.tres =
 	    TRES_TERM_OVRD_L2RDC;
-	if (channel_cookie == -1)
+	if (channel_cookie == NXGE_PKT_DISCARD)
 		tcam_ptr.match_action.bits.ldw.disc = 1;
 	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
 	    location, tcam_ptr.match_action.value);
 	if (rs & NPI_FFLP_ERROR) {
-		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
 		    " nxge_add_tcam_entry write"
 		    " failed for ASC RAM location %d", location));
-		return (NXGE_ERROR | rs);
+		goto fail;
 	}
 	bcopy((void *) &tcam_ptr,
 	    (void *) &nxgep->classifier.tcam_entries[location].tce,
 	    sizeof (tcam_entry_t));
+	nxgep->classifier.tcam_entry_cnt++;
+	nxgep->classifier.tcam_entries[location].valid = 1;
 
 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_tcam_entry"));
 	return (NXGE_OK);
+fail:
+	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_add_tcam_entry FAILED"));
+	return (NXGE_ERROR);
 }
 
 static nxge_status_t
@@ -1389,6 +1589,8 @@
 	bcopy((void *) &tcam_ptr,
 	    (void *) &nxgep->classifier.tcam_entries[location].tce,
 	    sizeof (tcam_entry_t));
+	nxgep->classifier.tcam_entry_cnt++;
+	nxgep->classifier.tcam_entries[location].valid = 1;
 	for (class = TCAM_CLASS_TCP_IPV4;
 	    class <= TCAM_CLASS_SCTP_IPV6; class++) {
 		class_config = nxgep->class_config.class_cfg[class];
@@ -1922,11 +2124,13 @@
 		return (NXGE_ERROR);
 	}
 
-	status = nxge_tcam_handle_ip_fragment(nxgep);
-	if (status != NXGE_OK) {
-		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-		    "nxge_tcam_handle_ip_fragment failed"));
-		return (NXGE_ERROR);
+	if (nxgep->classifier.fragment_bug == 1) {
+		status = nxge_tcam_handle_ip_fragment(nxgep);
+		if (status != NXGE_OK) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    "nxge_tcam_handle_ip_fragment failed"));
+			return (NXGE_ERROR);
+		}
 	}
 
 	nxgep->classifier.state |= NXGE_FFLP_HW_INIT;
@@ -2060,3 +2264,688 @@
 	statsp->errlog.hash_lookup2 = (uint32_t)fcram2_err.value;
 	return (NXGE_OK);
 }
+
+int
+nxge_get_valid_tcam_cnt(p_nxge_t nxgep) {
+	return ((nxgep->classifier.fragment_bug == 1) ?
+		nxgep->classifier.tcam_entry_cnt - 1 :
+		nxgep->classifier.tcam_entry_cnt);
+}
+
+int
+nxge_rxdma_channel_cnt(p_nxge_t nxgep)
+{
+	p_nxge_dma_pt_cfg_t p_dma_cfgp;
+	p_nxge_hw_pt_cfg_t p_cfgp;
+
+	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
+	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+	return (p_cfgp->max_rdcs);
+}
+
+/* ARGSUSED */
+int
+nxge_rxclass_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
+{
+	uint32_t cmd;
+	rx_class_cfg_t *cfg_info = (rx_class_cfg_t *)mp->b_rptr;
+
+	if (nxgep == NULL) {
+		return (-1);
+	}
+	cmd = cfg_info->cmd;
+	switch (cmd) {
+	default:
+		return (-1);
+
+	case NXGE_RX_CLASS_GCHAN:
+		cfg_info->data = nxge_rxdma_channel_cnt(nxgep);
+		break;
+	case NXGE_RX_CLASS_GRULE_CNT:
+		MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
+		cfg_info->rule_cnt = nxge_get_valid_tcam_cnt(nxgep);
+		MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
+		break;
+	case NXGE_RX_CLASS_GRULE:
+		nxge_get_tcam_entry(nxgep, &cfg_info->fs);
+		break;
+	case NXGE_RX_CLASS_GRULE_ALL:
+		nxge_get_tcam_entry_all(nxgep, cfg_info);
+		break;
+	case NXGE_RX_CLASS_RULE_DEL:
+		nxge_del_tcam_entry(nxgep, cfg_info->fs.location);
+		break;
+	case NXGE_RX_CLASS_RULE_INS:
+		(void) nxge_add_tcam_entry(nxgep, &cfg_info->fs);
+		break;
+	}
+	return (0);
+}
+/* ARGSUSED */
+int
+nxge_rxhash_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
+{
+	uint32_t cmd;
+	cfg_cmd_t	*cfg_info = (cfg_cmd_t *)mp->b_rptr;
+
+	if (nxgep == NULL) {
+		return (-1);
+	}
+	cmd = cfg_info->cmd;
+
+	switch (cmd) {
+	default:
+		return (-1);
+	case NXGE_IPTUN_CFG_ADD_CLS:
+		nxge_add_iptun_class(nxgep, &cfg_info->iptun_cfg,
+		    &cfg_info->class_id);
+		break;
+	case NXGE_IPTUN_CFG_SET_HASH:
+		nxge_cfg_iptun_hash(nxgep, &cfg_info->iptun_cfg,
+		    cfg_info->class_id);
+		break;
+	case NXGE_IPTUN_CFG_DEL_CLS:
+		nxge_del_iptun_class(nxgep, cfg_info->class_id);
+		break;
+	case NXGE_IPTUN_CFG_GET_CLS:
+		nxge_get_iptun_class(nxgep, &cfg_info->iptun_cfg,
+		    cfg_info->class_id);
+		break;
+	case NXGE_CLS_CFG_SET_SYM:
+		nxge_set_ip_cls_sym(nxgep, cfg_info->class_id, cfg_info->sym);
+		break;
+	case NXGE_CLS_CFG_GET_SYM:
+		nxge_get_ip_cls_sym(nxgep, cfg_info->class_id, &cfg_info->sym);
+		break;
+	}
+	return (0);
+}
+
+void
+nxge_get_tcam_entry_all(p_nxge_t nxgep, rx_class_cfg_t *cfgp)
+{
+	nxge_classify_t *clasp = &nxgep->classifier;
+	uint16_t	n_entries;
+	int		i, j, k;
+	tcam_flow_spec_t	*tcam_entryp;
+
+	cfgp->data = clasp->tcam_size;
+	MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
+	n_entries = cfgp->rule_cnt;
+
+	for (i = 0, j = 0; j < cfgp->data; j++) {
+		k = nxge_tcam_get_index(nxgep, j);
+		tcam_entryp = &clasp->tcam_entries[k];
+		if (tcam_entryp->valid != 1)
+			continue;
+		cfgp->rule_locs[i] = j;
+		i++;
+	};
+	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
+
+	if (n_entries != i) {
+		/* print warning, this should not happen */
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_tcam_entry_all"
+		    "n_entries[%d] != i[%d]!!!", n_entries, i));
+	}
+}
+
+
+/* Entries for the ports are interleaved in the TCAM */
+static uint16_t
+nxge_tcam_get_index(p_nxge_t nxgep, uint16_t index)
+{
+	/* One entry reserved for IP fragment rule */
+	if (index >= (nxgep->classifier.tcam_size - 1))
+		index = 0;
+	if (nxgep->classifier.fragment_bug == 1)
+		index++;
+	return (nxgep->classifier.tcam_top + (index * nxgep->nports));
+}
+
+static uint32_t
+nxge_tcam_cls_to_flow(uint32_t class_code) {
+	switch (class_code) {
+	case TCAM_CLASS_TCP_IPV4:
+		return (FSPEC_TCPIP4);
+	case TCAM_CLASS_UDP_IPV4:
+		return (FSPEC_UDPIP4);
+	case TCAM_CLASS_AH_ESP_IPV4:
+		return (FSPEC_AHIP4);
+	case TCAM_CLASS_SCTP_IPV4:
+		return (FSPEC_SCTPIP4);
+	case  TCAM_CLASS_TCP_IPV6:
+		return (FSPEC_TCPIP6);
+	case TCAM_CLASS_UDP_IPV6:
+		return (FSPEC_UDPIP6);
+	case TCAM_CLASS_AH_ESP_IPV6:
+		return (FSPEC_AHIP6);
+	case TCAM_CLASS_SCTP_IPV6:
+		return (FSPEC_SCTPIP6);
+	case TCAM_CLASS_IP_USER_4:
+	case TCAM_CLASS_IP_USER_5:
+	case TCAM_CLASS_IP_USER_6:
+	case TCAM_CLASS_IP_USER_7:
+		return (FSPEC_IP_USR);
+	default:
+		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "nxge_tcam_cls_to_flow"
+		    ": Unknown class code [0x%x]", class_code));
+		break;
+	}
+	return (0);
+}
+
+void
+nxge_get_tcam_entry(p_nxge_t nxgep, flow_resource_t *fs)
+{
+	uint16_t 	index;
+	tcam_flow_spec_t *tcam_ep;
+	tcam_entry_t	*tp;
+	flow_spec_t	*fspec;
+	tcpip4_spec_t 	*fspec_key;
+	tcpip4_spec_t 	*fspec_mask;
+
+	index = nxge_tcam_get_index(nxgep, (uint16_t)fs->location);
+	tcam_ep = &nxgep->classifier.tcam_entries[index];
+	if (tcam_ep->valid != 1) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_tcam_entry: :"
+		    "Entry [%d] invalid for index [%d]", fs->location, index));
+		return;
+	}
+
+	/* Fill the flow spec entry */
+	tp = &tcam_ep->tce;
+	fspec = &fs->flow_spec;
+	fspec->flow_type = nxge_tcam_cls_to_flow(tp->ip4_class_key);
+
+	/* TODO - look at proto field to differentiate between AH and ESP */
+	if (fspec->flow_type == FSPEC_AHIP4) {
+		if (tp->ip4_proto_key == IPPROTO_ESP)
+			fspec->flow_type = FSPEC_ESPIP4;
+	}
+
+	switch (tp->ip4_class_key) {
+	case TCAM_CLASS_TCP_IPV4:
+	case TCAM_CLASS_UDP_IPV4:
+	case TCAM_CLASS_AH_ESP_IPV4:
+	case TCAM_CLASS_SCTP_IPV4:
+		fspec_key = (tcpip4_spec_t *)&fspec->uh.tcpip4spec;
+		fspec_mask = (tcpip4_spec_t *)&fspec->um.tcpip4spec;
+		FSPEC_IPV4_ADDR(fspec_key->ip4dst, tp->ip4_dest_key);
+		FSPEC_IPV4_ADDR(fspec_mask->ip4dst, tp->ip4_dest_mask);
+		FSPEC_IPV4_ADDR(fspec_key->ip4src, tp->ip4_src_key);
+		FSPEC_IPV4_ADDR(fspec_mask->ip4src, tp->ip4_src_mask);
+		fspec_key->tos = tp->ip4_tos_key;
+		fspec_mask->tos = tp->ip4_tos_mask;
+		break;
+	default:
+		break;
+	}
+
+	switch (tp->ip4_class_key) {
+	case TCAM_CLASS_TCP_IPV4:
+	case TCAM_CLASS_UDP_IPV4:
+	case TCAM_CLASS_SCTP_IPV4:
+		FSPEC_IP_PORTS(fspec_key->pdst, fspec_key->psrc,
+		    tp->ip4_port_key);
+		FSPEC_IP_PORTS(fspec_mask->pdst, fspec_mask->psrc,
+		    tp->ip4_port_mask);
+		break;
+	case TCAM_CLASS_AH_ESP_IPV4:
+		fspec->uh.ahip4spec.spi = tp->ip4_port_key;
+		fspec->um.ahip4spec.spi = tp->ip4_port_mask;
+		break;
+	case TCAM_CLASS_IP_USER_4:
+	case TCAM_CLASS_IP_USER_5:
+	case TCAM_CLASS_IP_USER_6:
+	case TCAM_CLASS_IP_USER_7:
+		fspec->uh.ip_usr_spec.l4_4_bytes = tp->ip4_port_key;
+		fspec->um.ip_usr_spec.l4_4_bytes = tp->ip4_port_mask;
+		fspec->uh.ip_usr_spec.ip_ver = FSPEC_IP4;
+		fspec->uh.ip_usr_spec.proto = tp->ip4_proto_key;
+		fspec->um.ip_usr_spec.proto = tp->ip4_proto_mask;
+		break;
+	default:
+		break;
+	}
+
+	if (tp->match_action.bits.ldw.disc == 1) {
+		fs->channel_cookie = NXGE_PKT_DISCARD;
+	} else {
+		fs->channel_cookie = tp->match_action.bits.ldw.offset;
+	}
+}
+
+void
+nxge_del_tcam_entry(p_nxge_t nxgep, uint32_t location)
+{
+	npi_status_t rs = NPI_SUCCESS;
+	uint16_t 	index;
+	tcam_flow_spec_t *tcam_ep;
+	tcam_entry_t	*tp;
+	tcam_class_t	class;
+
+	MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
+	index = nxge_tcam_get_index(nxgep, (uint16_t)location);
+	tcam_ep = &nxgep->classifier.tcam_entries[index];
+	if (tcam_ep->valid != 1) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_del_tcam_entry: :"
+		    "Entry [%d] invalid for index [%d]", location, index));
+		goto fail;
+	}
+
+	/* Fill the flow spec entry */
+	tp = &tcam_ep->tce;
+	class = tp->ip4_class_key;
+	if (class >= TCAM_CLASS_IP_USER_4 && class <= TCAM_CLASS_IP_USER_7) {
+		int i;
+		nxge_usr_l3_cls_t *l3_ucls_p;
+		p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
+
+		for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
+			l3_ucls_p = &hw_p->tcam_l3_prog_cls[i];
+			if (l3_ucls_p->valid) {
+				if (l3_ucls_p->cls == class &&
+				    l3_ucls_p->tcam_ref_cnt) {
+					l3_ucls_p->tcam_ref_cnt--;
+					if (l3_ucls_p->tcam_ref_cnt > 0)
+						continue;
+					/* disable class */
+					rs = npi_fflp_cfg_ip_usr_cls_disable(
+					    nxgep->npi_reg_handle,
+					    (tcam_class_t)class);
+					if (rs != NPI_SUCCESS)
+						goto fail;
+					l3_ucls_p->cls = 0;
+					l3_ucls_p->pid = 0;
+					l3_ucls_p->valid = 0;
+					break;
+				}
+			}
+		}
+		if (i == NXGE_L3_PROG_CLS) {
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    "nxge_del_tcam_entry: Usr class "
+			    "0x%llx not found", (unsigned long long) class));
+			goto fail;
+		}
+	}
+
+	rs = npi_fflp_tcam_entry_invalidate(nxgep->npi_reg_handle, index);
+	if (rs != NPI_SUCCESS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "nxge_del_tcam_entry: TCAM invalidate failed "
+		    "at loc %d ", location));
+		goto fail;
+	}
+
+	nxgep->classifier.tcam_entries[index].valid = 0;
+	nxgep->classifier.tcam_entry_cnt--;
+
+	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
+	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_del_tcam_entry"));
+	return;
+fail:
+	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+	    "<== nxge_del_tcam_entry FAILED"));
+}
+
+static uint8_t
+nxge_iptun_pkt_type_to_pid(uint8_t pkt_type)
+{
+	uint8_t pid = 0;
+
+	switch (pkt_type) {
+	case IPTUN_PKT_IPV4:
+		pid = 4;
+		break;
+	case IPTUN_PKT_IPV6:
+		pid = 41;
+		break;
+	case IPTUN_PKT_GRE:
+		pid = 47;
+		break;
+	case IPTUN_PKT_GTP:
+		pid = 17;
+		break;
+	default:
+		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
+		    "nxge_iptun_pkt_type_to_pid: Unknown pkt type 0x%x",
+		    pkt_type));
+		break;
+	}
+
+	return (pid);
+}
+
+static npi_status_t
+nxge_set_iptun_usr_cls_reg(p_nxge_t nxgep, uint64_t class,
+		iptun_cfg_t *iptunp)
+{
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+
+	switch (iptunp->in_pkt_type) {
+	case IPTUN_PKT_IPV4:
+	case IPTUN_PKT_IPV6:
+		rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
+		    (tcam_class_t)class, 0, 0, 0, 0);
+		break;
+	case IPTUN_PKT_GRE:
+		rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
+		    (tcam_class_t)class, iptunp->l4b0_val,
+		    iptunp->l4b0_mask, 0, 0);
+		break;
+	case IPTUN_PKT_GTP:
+		rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
+		    (tcam_class_t)class, 0, 0, iptunp->l4b23_val,
+		    (iptunp->l4b23_sel & 0x01));
+		break;
+	default:
+		rs = NPI_FFLP_TCAM_CLASS_INVALID;
+		break;
+	}
+	return (rs);
+}
+
+void
+nxge_add_iptun_class(p_nxge_t nxgep, iptun_cfg_t *iptunp,
+		uint8_t *cls_idp)
+{
+	int i, add_cls;
+	uint8_t pid;
+	uint64_t class;
+	p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+
+	pid = nxge_iptun_pkt_type_to_pid(iptunp->in_pkt_type);
+	if (pid == 0)
+		return;
+
+	add_cls = 0;
+	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
+
+	/* Get an user programmable class ID */
+	for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
+		if (hw_p->tcam_l3_prog_cls[i].valid == 0) {
+			/* todo add new usr class reg */
+			switch (i) {
+			case 0:
+				class = TCAM_CLASS_IP_USER_4;
+				break;
+			case 1:
+				class = TCAM_CLASS_IP_USER_5;
+				break;
+			case 2:
+				class = TCAM_CLASS_IP_USER_6;
+				break;
+			case 3:
+				class = TCAM_CLASS_IP_USER_7;
+				break;
+			default:
+				break;
+			}
+			rs = npi_fflp_cfg_ip_usr_cls_set(handle,
+			    (tcam_class_t)class, 0, 0, pid, 0);
+			if (rs != NPI_SUCCESS)
+				goto fail;
+
+			rs = nxge_set_iptun_usr_cls_reg(nxgep, class, iptunp);
+
+			if (rs != NPI_SUCCESS)
+				goto fail;
+
+			rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
+			    (tcam_class_t)class);
+			if (rs != NPI_SUCCESS)
+				goto fail;
+
+			hw_p->tcam_l3_prog_cls[i].cls = class;
+			hw_p->tcam_l3_prog_cls[i].pid = pid;
+			hw_p->tcam_l3_prog_cls[i].flow_pkt_type =
+			    iptunp->in_pkt_type;
+			hw_p->tcam_l3_prog_cls[i].valid = 1;
+			*cls_idp = (uint8_t)class;
+			add_cls = 1;
+			break;
+		} else if (hw_p->tcam_l3_prog_cls[i].pid == pid) {
+			if (hw_p->tcam_l3_prog_cls[i].flow_pkt_type == 0) {
+				/* there is no flow key */
+				/* todo program the existing usr class reg */
+
+				rs = nxge_set_iptun_usr_cls_reg(nxgep, class,
+				    iptunp);
+				if (rs != NPI_SUCCESS)
+					goto fail;
+
+				rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
+				    (tcam_class_t)class);
+				if (rs != NPI_SUCCESS)
+					goto fail;
+
+				hw_p->tcam_l3_prog_cls[i].flow_pkt_type =
+				    iptunp->in_pkt_type;
+				*cls_idp = (uint8_t)class;
+				add_cls = 1;
+			} else {
+				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+				    "nxge_add_iptun_class: L3 usr "
+				    "programmable class with pid %d "
+				    "already exists", pid));
+			}
+			break;
+		}
+	}
+	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+
+	if (add_cls != 1) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "nxge_add_iptun_class: Could not add IP tunneling class"));
+	}
+	return;
+fail:
+	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_add_iptun_class: FAILED"));
+}
+
+static boolean_t
+nxge_is_iptun_cls_present(p_nxge_t nxgep, uint8_t cls_id, int *idx)
+{
+	int i;
+	p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
+
+	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
+	for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
+		if (hw_p->tcam_l3_prog_cls[i].valid &&
+		    hw_p->tcam_l3_prog_cls[i].flow_pkt_type != 0) {
+			if (hw_p->tcam_l3_prog_cls[i].cls == cls_id)
+				break;
+		}
+	}
+	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
+
+	if (i == NXGE_L3_PROG_CLS) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "nxge_is_iptun_cls_present: Invalid class %d", cls_id));
+		return (B_FALSE);
+	} else {
+		*idx = i;
+		return (B_TRUE);
+	}
+}
+
+void
+nxge_cfg_iptun_hash(p_nxge_t nxgep, iptun_cfg_t *iptunp, uint8_t cls_id)
+{
+	int idx;
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	flow_key_cfg_t cfg;
+
+	/* check to see that this is a valid class ID */
+	if (!nxge_is_iptun_cls_present(nxgep, cls_id, &idx)) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "nxge_cfg_iptun_hash: nxge_is_iptun_cls_present "
+		    "failed for cls_id %d", cls_id));
+		return;
+	}
+
+	bzero((void *)&cfg, sizeof (flow_key_cfg_t));
+
+	/*
+	 * This ensures that all 4 bytes of the XOR value are loaded to the
+	 * hash key.
+	 */
+	cfg.use_dport = cfg.use_sport = cfg.ip_opts_exist = 1;
+
+	cfg.l4_xor_sel = (iptunp->l4xor_sel & FL_KEY_USR_L4XOR_MSK);
+	cfg.use_l4_md = 1;
+
+	if (iptunp->hash_flags & HASH_L3PROTO)
+		cfg.use_proto = 1;
+	else if (iptunp->hash_flags & HASH_IPDA)
+		cfg.use_daddr = 1;
+	else if (iptunp->hash_flags & HASH_IPSA)
+		cfg.use_saddr = 1;
+	else if (iptunp->hash_flags & HASH_VLAN)
+		cfg.use_vlan = 1;
+	else if (iptunp->hash_flags & HASH_L2DA)
+		cfg.use_l2da = 1;
+	else if (iptunp->hash_flags & HASH_IFPORT)
+		cfg.use_portnum = 1;
+
+	(void) npi_fflp_cfg_ip_cls_flow_key_rfnl(handle, (tcam_class_t)cls_id,
+	    &cfg);
+}
+
+void
+nxge_del_iptun_class(p_nxge_t nxgep, uint8_t cls_id)
+{
+	int i;
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+
+
+	/* check to see that this is a valid class ID */
+	if (!nxge_is_iptun_cls_present(nxgep, cls_id, &i)) {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "nxge_del_iptun_class: Invalid class ID 0x%x", cls_id));
+		return;
+	}
+
+	MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
+	rs = npi_fflp_cfg_ip_usr_cls_disable(handle, (tcam_class_t)cls_id);
+	if (rs != NPI_SUCCESS)
+		goto fail;
+	nxgep->nxge_hw_p->tcam_l3_prog_cls[i].flow_pkt_type = 0;
+	if (nxgep->nxge_hw_p->tcam_l3_prog_cls[i].tcam_ref_cnt == 0)
+		nxgep->nxge_hw_p->tcam_l3_prog_cls[i].valid = 0;
+
+	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
+	return;
+fail:
+	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_del_iptun_class: FAILED"));
+}
+
+void
+nxge_get_iptun_class(p_nxge_t nxgep, iptun_cfg_t *iptunp, uint8_t cls_id)
+{
+	int i;
+	uint8_t pid;
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+	flow_key_cfg_t cfg;
+
+
+	/* check to see that this is a valid class ID */
+	if (!nxge_is_iptun_cls_present(nxgep, cls_id, &i))
+		return;
+
+	bzero((void *)iptunp, sizeof (iptun_cfg_t));
+
+	pid = nxgep->nxge_hw_p->tcam_l3_prog_cls[i].pid;
+
+	rs = npi_fflp_cfg_ip_usr_cls_get_iptun(handle, (tcam_class_t)cls_id,
+	    &iptunp->l4b0_val, &iptunp->l4b0_mask, &iptunp->l4b23_val,
+	    &iptunp->l4b23_sel);
+	if (rs != NPI_SUCCESS)
+		goto fail;
+
+	rs = npi_fflp_cfg_ip_cls_flow_key_get_rfnl(handle,
+	    (tcam_class_t)cls_id, &cfg);
+	if (rs != NPI_SUCCESS)
+		goto fail;
+
+	iptunp->l4xor_sel = cfg.l4_xor_sel;
+	if (cfg.use_proto)
+		iptunp->hash_flags |= HASH_L3PROTO;
+	else if (cfg.use_daddr)
+		iptunp->hash_flags |= HASH_IPDA;
+	else if (cfg.use_saddr)
+		iptunp->hash_flags |= HASH_IPSA;
+	else if (cfg.use_vlan)
+		iptunp->hash_flags |= HASH_VLAN;
+	else if (cfg.use_l2da)
+		iptunp->hash_flags |= HASH_L2DA;
+	else if (cfg.use_portnum)
+		iptunp->hash_flags |= HASH_IFPORT;
+
+	switch (pid) {
+	case 4:
+		iptunp->in_pkt_type = IPTUN_PKT_IPV4;
+		break;
+	case 41:
+		iptunp->in_pkt_type = IPTUN_PKT_IPV6;
+		break;
+	case 47:
+		iptunp->in_pkt_type = IPTUN_PKT_GRE;
+		break;
+	case 17:
+		iptunp->in_pkt_type = IPTUN_PKT_GTP;
+		break;
+	default:
+		iptunp->in_pkt_type = 0;
+		break;
+	}
+
+	return;
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_iptun_class: FAILED"));
+}
+
+void
+nxge_set_ip_cls_sym(p_nxge_t nxgep, uint8_t cls_id, uint8_t sym)
+{
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+	boolean_t sym_en = (sym == 1) ? B_TRUE : B_FALSE;
+
+	rs = npi_fflp_cfg_sym_ip_cls_flow_key(handle, (tcam_class_t)cls_id,
+	    sym_en);
+	if (rs != NPI_SUCCESS)
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "nxge_set_ip_cls_sym: FAILED"));
+}
+
+void
+nxge_get_ip_cls_sym(p_nxge_t nxgep, uint8_t cls_id, uint8_t *sym)
+{
+	npi_handle_t handle = nxgep->npi_reg_handle;
+	npi_status_t rs = NPI_SUCCESS;
+	flow_key_cfg_t cfg;
+
+	rs = npi_fflp_cfg_ip_cls_flow_key_get_rfnl(handle,
+	    (tcam_class_t)cls_id, &cfg);
+	if (rs != NPI_SUCCESS)
+		goto fail;
+
+	if (cfg.use_sym)
+		*sym = 1;
+	else
+		*sym = 0;
+	return;
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_ip_cls_sym: FAILED"));
+}
--- a/usr/src/uts/common/io/nxge/nxge_fzc.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/nxge_fzc.c	Fri Dec 11 10:41:17 2009 -0800
@@ -31,6 +31,8 @@
 
 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
 static int	nxge_herr2kerr(uint64_t);
+static uint64_t nxge_init_hv_fzc_lp_op(p_nxge_t, uint64_t,
+    uint64_t, uint64_t, uint64_t, uint64_t);
 #endif
 
 static nxge_status_t nxge_init_fzc_rdc_pages(p_nxge_t,
@@ -1265,8 +1267,8 @@
 	/*
 	 * Initialize logical page 1 for data buffers.
 	 */
-	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
-	    (uint64_t)0,
+	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
+	    (uint64_t)0, N2NIU_TX_LP_CONF,
 	    tx_ring_p->hv_tx_buf_base_ioaddr_pp,
 	    tx_ring_p->hv_tx_buf_ioaddr_size);
 
@@ -1288,10 +1290,9 @@
 
 #ifdef	DEBUG
 	ra = size = 0;
-	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
-	    (uint64_t)0,
-	    &ra,
-	    &size);
+	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
+	    (uint64_t)0, N2NIU_TX_LP_INFO,
+	    (uint64_t)&ra, (uint64_t)&size);
 
 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
 	    "==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
@@ -1323,8 +1324,8 @@
 	/*
 	 * Initialize logical page 2 for control buffers.
 	 */
-	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
-	    (uint64_t)1,
+	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
+	    (uint64_t)1, N2NIU_TX_LP_CONF,
 	    tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
 	    tx_ring_p->hv_tx_cntl_ioaddr_size);
 
@@ -1359,10 +1360,9 @@
 
 #ifdef	DEBUG
 	ra = size = 0;
-	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
-	    (uint64_t)1,
-	    &ra,
-	    &size);
+	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
+	    (uint64_t)1, N2NIU_TX_LP_INFO,
+	    (uint64_t)&ra, (uint64_t)&size);
 
 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
 	    "==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
@@ -1406,10 +1406,11 @@
 	}
 
 	/* Initialize data buffers for page 0 */
-	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
-	    (uint64_t)0,
+	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
+	    (uint64_t)0, N2NIU_RX_LP_CONF,
 	    rbrp->hv_rx_buf_base_ioaddr_pp,
 	    rbrp->hv_rx_buf_ioaddr_size);
+
 	err = (nxge_status_t)nxge_herr2kerr(hverr);
 	if (err != 0) {
 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
@@ -1429,10 +1430,9 @@
 
 #ifdef	DEBUG
 	ra = size = 0;
-	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
-	    (uint64_t)0,
-	    &ra,
-	    &size);
+	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
+	    (uint64_t)0, N2NIU_RX_LP_INFO,
+	    (uint64_t)&ra, (uint64_t)&size);
 
 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
 	    "==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
@@ -1452,8 +1452,8 @@
 #endif
 
 	/* Initialize control buffers for logical page 1.  */
-	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
-	    (uint64_t)1,
+	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
+	    (uint64_t)1, N2NIU_RX_LP_CONF,
 	    rbrp->hv_rx_cntl_base_ioaddr_pp,
 	    rbrp->hv_rx_cntl_ioaddr_size);
 
@@ -1476,11 +1476,9 @@
 
 #ifdef	DEBUG
 	ra = size = 0;
-	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
-	    (uint64_t)1,
-	    &ra,
-	    &size);
-
+	hverr = nxge_init_hv_fzc_lp_op(nxgep, (uint64_t)channel,
+	    (uint64_t)1, N2NIU_RX_LP_INFO,
+	    (uint64_t)&ra, (uint64_t)&size);
 
 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
 	    "==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
@@ -1532,4 +1530,144 @@
 	return (s_errcode);
 }
 
+uint64_t
+nxge_init_hv_fzc_lp_op(p_nxge_t nxgep, uint64_t channel,
+    uint64_t page_no, uint64_t op_type,
+    uint64_t ioaddr_pp, uint64_t ioaddr_size)
+{
+	uint64_t		hverr;
+	uint64_t		major;
+	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
+	nxhv_dc_fp_t		*io_fp;
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+	    "==> nxge_init_hv_fzc_lp_op"));
+
+	major = nxgep->niu_hsvc.hsvc_major;
+	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
+	    "==> nxge_init_hv_fzc_lp_op (major %d): channel %d op_type 0x%x "
+	    "page_no %d ioaddr_pp $%p ioaddr_size 0x%llx",
+	    major, channel, op_type, page_no, ioaddr_pp, ioaddr_size));
+
+	/* Call the transmit conf function. */
+	switch (major) {
+	case NIU_MAJOR_VER: /* 1 */
+		switch (op_type) {
+		case N2NIU_TX_LP_CONF:
+			io_fp = &nhd->hio.tx;
+			hverr = (*io_fp->lp_conf)((uint64_t)channel,
+			    (uint64_t)page_no,
+			    (uint64_t)ioaddr_pp,
+			    (uint64_t)ioaddr_size);
+			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			    "==> nxge_init_hv_fzc_lp_op(tx_conf): major %d "
+			    "op 0x%x hverr 0x%x", major, op_type, hverr));
+			break;
+
+		case N2NIU_TX_LP_INFO:
+			io_fp = &nhd->hio.tx;
+			hverr = (*io_fp->lp_info)((uint64_t)channel,
+			    (uint64_t)page_no,
+			    (uint64_t *)ioaddr_pp,
+			    (uint64_t *)ioaddr_size);
+			break;
+
+		case N2NIU_RX_LP_CONF:
+			io_fp = &nhd->hio.rx;
+			hverr = (*io_fp->lp_conf)((uint64_t)channel,
+			    (uint64_t)page_no,
+			    (uint64_t)ioaddr_pp,
+			    (uint64_t)ioaddr_size);
+			break;
+
+		case N2NIU_RX_LP_INFO:
+			io_fp = &nhd->hio.rx;
+			hverr = (*io_fp->lp_info)((uint64_t)channel,
+			    (uint64_t)page_no,
+			    (uint64_t *)ioaddr_pp,
+			    (uint64_t *)ioaddr_size);
+			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
+			    "op 0x%x hverr 0x%x", major, op_type, hverr));
+			break;
+
+		default:
+			hverr = EINVAL;
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
+			    "invalid op 0x%x hverr 0x%x", major,
+			    op_type, hverr));
+			break;
+		}
+
+		break;
+
+	case NIU_MAJOR_VER_2: /* 2 */
+		switch (op_type) {
+		case N2NIU_TX_LP_CONF:
+			io_fp = &nhd->hio.tx;
+			hverr = (*io_fp->lp_cfgh_conf)(nxgep->niu_cfg_hdl,
+			    (uint64_t)channel,
+			    (uint64_t)page_no, ioaddr_pp, ioaddr_size);
+
+			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			    "==> nxge_init_hv_fzc_lp_op(tx_conf): major %d "
+			    "op 0x%x hverr 0x%x", major, op_type, hverr));
+			break;
+
+		case N2NIU_TX_LP_INFO:
+			io_fp = &nhd->hio.tx;
+			hverr = (*io_fp->lp_cfgh_info)(nxgep->niu_cfg_hdl,
+			    (uint64_t)channel,
+			    (uint64_t)page_no,
+			    (uint64_t *)ioaddr_pp,
+			    (uint64_t *)ioaddr_size);
+			break;
+
+		case N2NIU_RX_LP_CONF:
+			io_fp = &nhd->hio.rx;
+			hverr = (*io_fp->lp_cfgh_conf)(nxgep->niu_cfg_hdl,
+			    (uint64_t)channel,
+			    (uint64_t)page_no,
+			    (uint64_t)ioaddr_pp,
+			    (uint64_t)ioaddr_size);
+			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
+			    "hverr 0x%x", major, hverr));
+			break;
+
+		case N2NIU_RX_LP_INFO:
+			io_fp = &nhd->hio.rx;
+			hverr = (*io_fp->lp_cfgh_info)(nxgep->niu_cfg_hdl,
+			    (uint64_t)channel,
+			    (uint64_t)page_no,
+			    (uint64_t *)ioaddr_pp,
+			    (uint64_t *)ioaddr_size);
+			break;
+
+		default:
+			hverr = EINVAL;
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    "==> nxge_init_hv_fzc_lp_op(rx_conf): major %d "
+			    "invalid op 0x%x hverr 0x%x", major,
+			    op_type, hverr));
+			break;
+		}
+
+		break;
+
+	default:
+		hverr = EINVAL;
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "==> nxge_init_hv_fzc_lp_op(rx_conf): invalid major %d "
+		    "op 0x%x hverr 0x%x", major, op_type, hverr));
+		break;
+	}
+
+	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
+	    "<== nxge_init_hv_fzc_lp_op: 0x%x", hverr));
+
+	return (hverr);
+}
+
 #endif	/* sun4v and NIU_LP_WORKAROUND */
--- a/usr/src/uts/common/io/nxge/nxge_hcall.s	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/nxge_hcall.s	Fri Dec 11 10:41:17 2009 -0800
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -74,6 +74,19 @@
 #define	N2NIU_VRTX_PARAM_GET	0x15a
 #define	N2NIU_VRTX_PARAM_SET	0x15b
 
+/*
+ * The new set of HV APIs to provide the ability
+ * of a domain to manage multiple NIU resources at once to
+ * support the KT familty chip having up to 4 NIUs
+ * per system. The trap # will be the same as those defined
+ * before 2.0
+ */
+#define	N2NIU_CFGH_RX_LP_SET	0x142
+#define	N2NIU_CFGH_TX_LP_SET	0x143
+#define	N2NIU_CFGH_RX_LP_GET	0x144
+#define	N2NIU_CFGH_TX_LP_GET	0x145
+#define	N2NIU_CFGH_VR_ASSIGN	0x146
+
 #if defined(lint) || defined(__lint)
 
 /*ARGSUSED*/
@@ -105,6 +118,38 @@
 hv_niu_vr_assign(uint64_t vridx, uint64_t ldc_id, uint32_t *cookie)
 { return (0); }
 
+/*
+ * KT: Interfaces functions which require the configuration handle
+ */
+/*ARGSUSED*/
+uint64_t
+hv_niu_cfgh_rx_logical_page_conf(uint64_t cfgh, uint64_t chidx, uint64_t pgidx,
+	uint64_t raddr, uint64_t size)
+{ return (0); }
+
+/*ARGSUSED*/
+uint64_t
+hv_niu_cfgh_rx_logical_page_info(uint64_t cfgh, uint64_t chidx, uint64_t pgidx,
+	uint64_t *raddr, uint64_t *size)
+{ return (0); }
+
+/*ARGSUSED*/
+uint64_t
+hv_niu_cfgh_tx_logical_page_conf(uint64_t cfgh, uint64_t chidx, uint64_t pgidx,
+	uint64_t raddr, uint64_t size)
+{ return (0); }
+
+/*ARGSUSED*/
+uint64_t
+hv_niu_cfgh_tx_logical_page_info(uint64_t cfgh, uint64_t chidx, uint64_t pgidx,
+	uint64_t *raddr, uint64_t *size)
+{ return (0); }
+
+/*ARGSUSED*/
+uint64_t
+hv_niu_cfgh_vr_assign(uint64_t cfgh, uint64_t vridx, uint64_t ldc_id, uint32_t *cookie)
+{ return (0); }
+
 /*ARGSUSED*/
 uint64_t
 hv_niu_vr_unassign(uint32_t cookie)
@@ -517,6 +562,71 @@
 	nop
 	SET_SIZE(hv_niu_vrtx_param_set)
 
+	/*
+	 * Interfaces functions which require the configuration handle.
+	 */
+	/*
+	 * hv_niu__cfgh_rx_logical_page_conf(uint64_t cfgh, uint64_t chidx,
+	 *    uint64_t pgidx, uint64_t raddr, uint64_t size)
+	 */
+	ENTRY(hv_niu_cfgh_rx_logical_page_conf)
+	mov	N2NIU_RX_LP_CONF, %o5
+	ta	FAST_TRAP
+	retl
+	nop
+	SET_SIZE(hv_niu_cfgh_rx_logical_page_conf)
+
+	/*
+	 * hv_niu__cfgh_rx_logical_page_info(uint64_t cfgh, uint64_t chidx,
+	 *    uint64_t pgidx, uint64_t *raddr, uint64_t *size)
+	 */
+	ENTRY(hv_niu_cfgh_rx_logical_page_info)
+	mov	%o3, %g1
+	mov	%o4, %g2
+	mov	N2NIU_RX_LP_INFO, %o5
+	ta	FAST_TRAP
+	stx	%o1, [%g1]
+	retl
+	stx	%o2, [%g2]
+	SET_SIZE(hv_niu_cfgh_rx_logical_page_info)
+
+	/*
+	 * hv_niu_cfgh_tx_logical_page_conf(uint64_t cfgh, uint64_t chidx,
+	 *    uint64_t pgidx, uint64_t raddr, uint64_t size)
+	 */
+	ENTRY(hv_niu_cfgh_tx_logical_page_conf)
+	mov	N2NIU_TX_LP_CONF, %o5
+	ta	FAST_TRAP
+	retl
+	nop
+	SET_SIZE(hv_niu_cfgh_tx_logical_page_conf)
+
+	/*
+	 * hv_niu_cfgh_tx_logical_page_info(uint64_t cfgh, uint64_t chidx,
+	 *    uint64_t pgidx, uint64_t *raddr, uint64_t *size)
+	 */
+	ENTRY(hv_niu_cfgh_tx_logical_page_info)
+	mov	%o3, %g1
+	mov	%o4, %g2
+	mov	N2NIU_TX_LP_INFO, %o5
+	ta	FAST_TRAP
+	stx	%o1, [%g1]
+	retl
+	stx	%o2, [%g2]
+	SET_SIZE(hv_niu_cfgh_tx_logical_page_info)
+
+	/*
+	 * hv_niu_cfgh_vr_assign(uint64_t cfgh, uint64_t vridx, uint64_t ldc_id,
+	 *     uint32_t *cookie)
+	 */
+	ENTRY(hv_niu_cfgh_vr_assign)
+	mov	%o3, %g1
+	mov	N2NIU_VR_ASSIGN, %o5
+	ta	FAST_TRAP
+	retl
+	stw	%o1, [%g1]
+	SET_SIZE(hv_niu_cfgh_vr_assign)
+
 #endif	/* lint || __lint */
 
 #endif /*defined(sun4v)*/
--- a/usr/src/uts/common/io/nxge/nxge_hio.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/nxge_hio.c	Fri Dec 11 10:41:17 2009 -0800
@@ -353,6 +353,9 @@
 	nxge_grp_set_t *set;
 	vpc_type_t type;
 
+	if (group == NULL)
+		return;
+
 	MUTEX_ENTER(&nxge->group_lock);
 	switch (group->type) {
 	case NXGE_TRANSMIT_GROUP:
@@ -987,13 +990,20 @@
 	    (nxge->niu_type == N2_NIU)) {
 		if (nxge->niu_hsvc_available == B_TRUE) {
 			hsvc_info_t *niu_hsvc = &nxge->niu_hsvc;
-			if (niu_hsvc->hsvc_major == 1 &&
-			    niu_hsvc->hsvc_minor == 1)
+			/*
+			 * Versions supported now are:
+			 *  - major number >= 1 (NIU_MAJOR_VER).
+			 */
+			if ((niu_hsvc->hsvc_major >= NIU_MAJOR_VER) ||
+			    (niu_hsvc->hsvc_major == 1 &&
+			    niu_hsvc->hsvc_minor == 1)) {
 				nxge->environs = SOLARIS_SERVICE_DOMAIN;
-			NXGE_DEBUG_MSG((nxge, HIO_CTL,
-			    "nxge_hio_init: hypervisor services "
-			    "version %d.%d",
-			    niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor));
+				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
+				    "nxge_hio_init: hypervisor services "
+				    "version %d.%d",
+				    niu_hsvc->hsvc_major,
+				    niu_hsvc->hsvc_minor));
+			}
 		}
 	}
 
@@ -1390,21 +1400,45 @@
 	nxge_hio_dc_t *dc;
 	nxhv_vr_fp_t *fp;
 	int i;
+	uint64_t major;
 
 	/*
 	 * Ask the Hypervisor to set up the VR for us
 	 */
 	fp = &nhd->hio.vr;
-	if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
-		NXGE_ERROR_MSG((nxge, HIO_CTL,
-		    "nxge_hio_share_assign: "
-		    "vr->assign() returned %d", hv_rv));
-		return (-EIO);
+	major = nxge->niu_hsvc.hsvc_major;
+	switch (major) {
+	case NIU_MAJOR_VER: /* 1 */
+		if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
+			NXGE_ERROR_MSG((nxge, HIO_CTL,
+			    "nxge_hio_share_assign: major %d "
+			    "vr->assign() returned %d", major, hv_rv));
+			nxge_hio_unshare(vr);
+			return (-EIO);
+		}
+
+		break;
+
+	case NIU_MAJOR_VER_2: /* 2 */
+	default:
+		if ((hv_rv = (*fp->cfgh_assign)
+		    (nxge->niu_cfg_hdl, vr->region, cookie, &vr->cookie))) {
+			NXGE_ERROR_MSG((nxge, HIO_CTL,
+			    "nxge_hio_share_assign: major %d "
+			    "vr->assign() returned %d", major, hv_rv));
+			nxge_hio_unshare(vr);
+			return (-EIO);
+		}
+
+		break;
 	}
 
+	NXGE_DEBUG_MSG((nxge, HIO_CTL,
+	    "nxge_hio_share_assign: major %d "
+	    "vr->assign() success", major));
+
 	/*
 	 * For each shared TDC, ask the HV to find us an empty slot.
-	 * -----------------------------------------------------
 	 */
 	dc = vr->tx_group.dc;
 	for (i = 0; i < NXGE_MAX_TDCS; i++) {
@@ -1432,7 +1466,6 @@
 
 	/*
 	 * For each shared RDC, ask the HV to find us an empty slot.
-	 * -----------------------------------------------------
 	 */
 	dc = vr->rx_group.dc;
 	for (i = 0; i < NXGE_MAX_RDCS; i++) {
--- a/usr/src/uts/common/io/nxge/nxge_hv.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/nxge_hv.c	Fri Dec 11 10:41:17 2009 -0800
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -35,6 +35,13 @@
 #include <sys/nxge/nxge_impl.h>
 #include <sys/nxge/nxge_hio.h>
 
+/*
+ * The HV VR functions are set up based on the
+ * the version number of the NIU API group.
+ * For version 2.0 and above, the NIU will be
+ * be referenced from the cfg-handle.
+ */
+
 #if defined(sun4v)
 
 void
@@ -49,7 +56,11 @@
 	/* First, the HV VR functions. */
 	vr = &nhd->hio.vr;
 
+	/* HV Major 1 interfaces */
 	vr->assign = &hv_niu_vr_assign;
+	/* HV Major 2 interfaces */
+	vr->cfgh_assign = &hv_niu_cfgh_vr_assign;
+
 	vr->unassign = &hv_niu_vr_unassign;
 	vr->getinfo = &hv_niu_vr_getinfo;
 
@@ -61,12 +72,15 @@
 	tx->unassign = &hv_niu_tx_dma_unassign;
 	tx->get_map = &hv_niu_vr_get_txmap;
 
+	/* HV Major 1 interfaces */
 	tx->lp_conf = &hv_niu_tx_logical_page_conf;
 	tx->lp_info = &hv_niu_tx_logical_page_info;
+	/* HV Major 2 interfaces */
+	tx->lp_cfgh_conf = &hv_niu_cfgh_tx_logical_page_conf;
+	tx->lp_cfgh_info = &hv_niu_cfgh_tx_logical_page_info;
 
 	tx->getinfo = &hv_niu_vrtx_getinfo;
 
-	// -------------------------------------------------------------
 	/* Now find the Receive functions. */
 	rx = &nhd->hio.rx;
 
@@ -74,8 +88,12 @@
 	rx->unassign = &hv_niu_rx_dma_unassign;
 	rx->get_map = &hv_niu_vr_get_rxmap;
 
+	/* HV Major 1 interfaces */
 	rx->lp_conf = &hv_niu_rx_logical_page_conf;
 	rx->lp_info = &hv_niu_rx_logical_page_info;
+	/* HV Major 2 interfaces */
+	rx->lp_cfgh_conf = &hv_niu_cfgh_rx_logical_page_conf;
+	rx->lp_cfgh_info = &hv_niu_cfgh_rx_logical_page_info;
 
 	rx->getinfo = &hv_niu_vrrx_getinfo;
 }
--- a/usr/src/uts/common/io/nxge/nxge_hw.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/nxge_hw.c	Fri Dec 11 10:41:17 2009 -0800
@@ -53,6 +53,7 @@
 
 extern uint32_t nxge_rx_mode;
 extern uint32_t nxge_jumbo_mtu;
+extern uint16_t	nxge_rdc_buf_offset;
 
 static void
 nxge_rtrace_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
@@ -144,6 +145,41 @@
 	/* per neptune common block init */
 	(void) nxge_fflp_hw_reset(nxgep);
 
+	if (nxgep->niu_hw_type != NIU_HW_TYPE_RF) {
+		switch (nxge_rdc_buf_offset) {
+		case SW_OFFSET_NO_OFFSET:
+		case SW_OFFSET_64:
+		case SW_OFFSET_128:
+			break;
+		default:
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    "nxge_hw_init_niu_common: Unsupported RDC buffer"
+			    " offset code %d, setting to %d",
+			    nxge_rdc_buf_offset, SW_OFFSET_NO_OFFSET));
+			nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
+			break;
+		}
+	} else {
+		switch (nxge_rdc_buf_offset) {
+		case SW_OFFSET_NO_OFFSET:
+		case SW_OFFSET_64:
+		case SW_OFFSET_128:
+		case SW_OFFSET_192:
+		case SW_OFFSET_256:
+		case SW_OFFSET_320:
+		case SW_OFFSET_384:
+		case SW_OFFSET_448:
+			break;
+		default:
+			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+			    "nxge_hw_init_niu_common: Unsupported RDC buffer"
+			    " offset code %d, setting to %d",
+			    nxge_rdc_buf_offset, SW_OFFSET_NO_OFFSET));
+			nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
+			break;
+		}
+	}
+
 	hw_p->flags = COMMON_INIT_DONE;
 	MUTEX_EXIT(&hw_p->nxge_cfg_lock);
 
--- a/usr/src/uts/common/io/nxge/nxge_mac.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/nxge_mac.c	Fri Dec 11 10:41:17 2009 -0800
@@ -90,6 +90,7 @@
 static boolean_t nxge_is_supported_phy(uint32_t, uint8_t);
 static boolean_t nxge_is_phy_present(p_nxge_t, int, uint32_t, uint32_t);
 static nxge_status_t nxge_n2_serdes_init(p_nxge_t);
+static nxge_status_t nxge_n2_kt_serdes_init(p_nxge_t);
 static nxge_status_t nxge_neptune_10G_serdes_init(p_nxge_t);
 static nxge_status_t nxge_1G_serdes_init(p_nxge_t);
 static nxge_status_t nxge_10G_link_intr_stop(p_nxge_t);
@@ -1476,6 +1477,11 @@
 
 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_n2_serdes_init port<%d>",
 	    portn));
+	if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) {
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_serdes_init port<%d>: KT-NIU", portn));
+		return (nxge_n2_kt_serdes_init(nxgep));
+	}
 
 	tx_cfg_l.value = 0;
 	tx_cfg_h.value = 0;
@@ -1670,6 +1676,280 @@
 	    portn));
 
 	return (status);
+
+}
+
+/* Initialize the TI Hedwig Internal Serdes (N2-KT-NIU only) */
+
+static nxge_status_t
+nxge_n2_kt_serdes_init(p_nxge_t nxgep)
+{
+	uint8_t portn;
+	int chan;
+	k_esr_ti_cfgpll_l_t pll_cfg_l;
+	k_esr_ti_cfgrx_l_t rx_cfg_l;
+	k_esr_ti_cfgrx_h_t rx_cfg_h;
+	k_esr_ti_cfgtx_l_t tx_cfg_l;
+	k_esr_ti_cfgtx_h_t tx_cfg_h;
+#ifdef NXGE_DEBUG
+	k_esr_ti_testcfg_t cfg;
+#endif
+	k_esr_ti_testcfg_t test_cfg;
+	nxge_status_t status = NXGE_OK;
+	boolean_t mode_1g = B_FALSE;
+
+	portn = nxgep->mac.portnum;
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+	    "==> nxge_n2_kt_serdes_init port<%d>", portn));
+
+	tx_cfg_l.value = 0;
+	tx_cfg_h.value = 0;
+	rx_cfg_l.value = 0;
+	rx_cfg_h.value = 0;
+	pll_cfg_l.value = 0;
+	test_cfg.value = 0;
+
+	/*
+	 * The following setting assumes the reference clock frquency
+	 * is 156.25 MHz.
+	 */
+	/*
+	 * If the nxge driver has been plumbed without a link, then it will
+	 * detect a link up when a cable connecting to an anto-negotiation
+	 * partner is plugged into the port. Because the TN1010 PHY supports
+	 * both 1G and 10G speeds, the driver must re-configure the
+	 * Neptune/NIU according to the negotiated speed.  nxge_n2_serdes_init
+	 * is called at the post-link-up reconfiguration time. Here it calls
+	 * nxge_set_tn1010_param to set portmode before re-initializing
+	 * the serdes.
+	 */
+	if (nxgep->mac.portmode == PORT_1G_TN1010 ||
+	    nxgep->mac.portmode == PORT_10G_TN1010) {
+		if (nxge_set_tn1010_param(nxgep) != NXGE_OK) {
+			goto fail;
+		}
+	}
+	if (nxgep->mac.portmode == PORT_10G_FIBER ||
+	    nxgep->mac.portmode == PORT_10G_TN1010 ||
+	    nxgep->mac.portmode == PORT_10G_SERDES) {
+		tx_cfg_l.bits.entx = K_CFGTX_ENABLE_TX;
+		/* 0x1e21 */
+		tx_cfg_l.bits.swing = K_CFGTX_SWING_2000MV;
+		tx_cfg_l.bits.rate = K_CFGTX_RATE_HALF;
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> tx_cfg_l 0x%x",
+		    portn, tx_cfg_l.value));
+
+		/* channel 0: enable syn. master */
+		/* 0x40 */
+		tx_cfg_h.bits.msync = K_CFGTX_ENABLE_MSYNC;
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> tx_cfg_h 0x%x",
+		    portn, tx_cfg_h.value));
+		/* 0x4821 */
+		rx_cfg_l.bits.enrx = K_CFGRX_ENABLE_RX;
+		rx_cfg_l.bits.rate = K_CFGRX_RATE_HALF;
+		rx_cfg_l.bits.align = K_CFGRX_ALIGN_EN;
+		rx_cfg_l.bits.los = K_CFGRX_LOS_ENABLE;
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> rx_cfg_l 0x%x",
+		    portn, rx_cfg_l.value));
+
+		/* 0x0008 */
+		rx_cfg_h.bits.eq = K_CFGRX_EQ_ADAPTIVE;
+
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> rx_cfg_h 0x%x",
+		    portn, rx_cfg_h.value));
+
+		/* Set loopback mode if necessary */
+		if (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g) {
+			tx_cfg_h.bits.loopback = K_CFGTX_INNER_CML_ENA_LOOPBACK;
+			rx_cfg_h.bits.loopback = K_CFGTX_INNER_CML_ENA_LOOPBACK;
+			rx_cfg_l.bits.los = 0;
+
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+			    "==> nxge_n2_kt_serdes_init port<%d>: "
+			    "loopback 0x%x", portn, tx_cfg_h.value));
+		}
+		/* 0xa1: Initialize PLL for 10G */
+		pll_cfg_l.bits.mpy = K_CFGPLL_MPY_20X;
+		pll_cfg_l.bits.enpll = K_CFGPLL_ENABLE_PLL;
+
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> pll_cfg_l 0x%x",
+		    portn, pll_cfg_l.value));
+
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+		    ESR_N2_PLL_CFG_L_REG, pll_cfg_l.value)) != NXGE_OK)
+			goto fail;
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> pll_cfg_l 0x%x",
+		    portn, pll_cfg_l.value));
+#ifdef  NXGE_DEBUG
+		nxge_mdio_read(nxgep, portn, ESR_N2_DEV_ADDR,
+		    ESR_N2_PLL_CFG_L_REG, &cfg.value);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d>: "
+		    "PLL cfg.l 0x%x (0x%x)",
+		    portn, pll_cfg_l.value, cfg.value));
+
+		nxge_mdio_read(nxgep, portn, ESR_N2_DEV_ADDR,
+		    ESR_N2_PLL_STS_L_REG, &cfg.value);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d>: (0x%x)",
+		    portn, cfg.value));
+#endif
+	} else if (nxgep->mac.portmode == PORT_1G_FIBER ||
+	    nxgep->mac.portmode == PORT_1G_TN1010 ||
+	    nxgep->mac.portmode == PORT_1G_SERDES) {
+		mode_1g = B_TRUE;
+		/* 0x1e41 */
+		tx_cfg_l.bits.entx = 1;
+		tx_cfg_l.bits.rate = K_CFGTX_RATE_HALF;
+		tx_cfg_l.bits.swing = K_CFGTX_SWING_2000MV;
+
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> tx_cfg_l 0x%x",
+		    portn, tx_cfg_l.value));
+
+
+		/* channel 0: enable syn. master */
+		tx_cfg_h.bits.msync = K_CFGTX_ENABLE_MSYNC;
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> tx_cfg_h 0x%x",
+		    portn, tx_cfg_h.value));
+
+
+		/* 0x4841 */
+		rx_cfg_l.bits.enrx = 1;
+		rx_cfg_l.bits.rate = K_CFGRX_RATE_HALF;
+		rx_cfg_l.bits.align = K_CFGRX_ALIGN_EN;
+		rx_cfg_l.bits.los = K_CFGRX_LOS_ENABLE;
+
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> rx_cfg_l 0x%x",
+		    portn, rx_cfg_l.value));
+
+		/* 0x0008 */
+		rx_cfg_h.bits.eq = K_CFGRX_EQ_ADAPTIVE_LF_365MHZ_ZF;
+
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> tx_cfg_h 0x%x",
+		    portn, rx_cfg_h.value));
+
+		/* 0xa1: Initialize PLL for 1G */
+		pll_cfg_l.bits.mpy = K_CFGPLL_MPY_20X;
+		pll_cfg_l.bits.enpll = K_CFGPLL_ENABLE_PLL;
+
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d> pll_cfg_l 0x%x",
+		    portn, pll_cfg_l.value));
+
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+		    ESR_N2_PLL_CFG_L_REG, pll_cfg_l.value))
+		    != NXGE_OK)
+			goto fail;
+
+
+#ifdef  NXGE_DEBUG
+		nxge_mdio_read(nxgep, portn, ESR_N2_DEV_ADDR,
+		    ESR_N2_PLL_CFG_L_REG, &cfg.value);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "==> nxge_n2_serdes_init port<%d>: PLL cfg.l 0x%x (0x%x)",
+		    portn, pll_cfg_l.value, cfg.value));
+
+		nxge_mdio_read(nxgep, portn, ESR_N2_DEV_ADDR,
+		    ESR_N2_PLL_STS_L_REG, &cfg.value);
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d>: (0x%x)",
+		    portn, cfg.value));
+#endif
+
+		/* Set loopback mode if necessary */
+		if (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes1000) {
+			tx_cfg_h.bits.loopback = TESTCFG_INNER_CML_DIS_LOOPBACK;
+
+			NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+			    "==> nxge_n2_kt_serdes_init port<%d>: "
+			    "loopback 0x%x", portn, test_cfg.value));
+			if ((status = nxge_mdio_write(nxgep, portn,
+			    ESR_N2_DEV_ADDR,
+			    ESR_N2_TX_CFG_L_REG_ADDR(0),
+			    tx_cfg_h.value)) != NXGE_OK) {
+				goto fail;
+			}
+		}
+	} else {
+		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+		    "nxge_n2_kt_serdes_init:port<%d> - "
+		    "unsupported port mode %d",
+		    portn, nxgep->mac.portmode));
+		goto fail;
+	}
+
+	NXGE_DELAY(20);
+	/* Clear the test register (offset 0x8004) */
+	if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+	    ESR_N2_TEST_CFG_REG, test_cfg.value)) != NXGE_OK) {
+		goto fail;
+	}
+	NXGE_DELAY(20);
+
+	/* init TX channels */
+	for (chan = 0; chan < 4; chan++) {
+		if (mode_1g)
+			tx_cfg_l.value = 0;
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+		    ESR_N2_TX_CFG_L_REG_ADDR(chan), tx_cfg_l.value)) != NXGE_OK)
+			goto fail;
+
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+		    ESR_N2_TX_CFG_H_REG_ADDR(chan), tx_cfg_h.value)) != NXGE_OK)
+			goto fail;
+
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d>: "
+		    "chan %d tx_cfg_l 0x%x", portn, chan, tx_cfg_l.value));
+
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d>: "
+		    "chan %d tx_cfg_h 0x%x", portn, chan, tx_cfg_h.value));
+	}
+
+	/* init RX channels */
+	/* 1G mode only write to the first channel */
+	for (chan = 0; chan < 4; chan++) {
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+		    ESR_N2_RX_CFG_L_REG_ADDR(chan), rx_cfg_l.value))
+		    != NXGE_OK)
+			goto fail;
+
+		if ((status = nxge_mdio_write(nxgep, portn, ESR_N2_DEV_ADDR,
+		    ESR_N2_RX_CFG_H_REG_ADDR(chan), rx_cfg_h.value))
+		    != NXGE_OK)
+			goto fail;
+
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d>: "
+		    "chan %d rx_cfg_l 0x%x", portn, chan, rx_cfg_l.value));
+
+		NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+		    "==> nxge_n2_kt_serdes_init port<%d>: "
+		    "chan %d rx_cfg_h 0x%x", portn, chan, rx_cfg_h.value));
+	}
+
+	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
+	    "<== nxge_n2_kt_serdes_init port<%d>", portn));
+
+	return (NXGE_OK);
+fail:
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+	    "nxge_n2_serdes_init: Failed to initialize N2 serdes for port<%d>",
+	    portn));
+
+	return (status);
 }
 
 /* Initialize the Neptune Internal Serdes for 10G (Neptune only) */
@@ -5938,7 +6218,7 @@
 	uint16_t	val1, val2, val3;
 #ifdef	NXGE_DEBUG_SYMBOL_ERR
 	uint16_t	val_debug;
-	uint16_t	val;
+	uint32_t	val;
 #endif
 
 	phy_port_addr = nxgep->statsp->mac_stats.xcvr_portn;
@@ -6010,7 +6290,6 @@
 		goto fail;
 	}
 
-
 #ifdef	NXGE_DEBUG_ALIGN_ERR
 	/* Temp workaround for link down issue */
 	if (pcs_blk_lock == B_FALSE) {
--- a/usr/src/uts/common/io/nxge/nxge_main.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/nxge_main.c	Fri Dec 11 10:41:17 2009 -0800
@@ -96,6 +96,7 @@
 uint32_t 	nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
 uint32_t 	nxge_rbr_spare_size = 0;
 uint32_t 	nxge_rcr_size = NXGE_RCR_DEFAULT;
+uint16_t	nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
 uint32_t 	nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
 boolean_t 	nxge_no_msg = B_TRUE;		/* control message display */
 uint32_t 	nxge_no_link_notify = 0;	/* control DL_NOTIFY */
@@ -171,6 +172,16 @@
 /*
  * Hypervisor N2/NIU services information.
  */
+/*
+ * The following is the default API supported:
+ * major 1 and minor 1.
+ *
+ * Please update the MAX_NIU_MAJORS,
+ * MAX_NIU_MINORS, and minor number supported
+ * when the newer Hypervior API interfaces
+ * are added. Also, please update nxge_hsvc_register()
+ * if needed.
+ */
 static hsvc_info_t niu_hsvc = {
 	HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
 	NIU_MINOR_VER, "nxge"
@@ -1073,27 +1084,89 @@
 nxge_hsvc_register(nxge_t *nxgep)
 {
 	nxge_status_t status;
-
-	if (nxgep->niu_type == N2_NIU) {
-		nxgep->niu_hsvc_available = B_FALSE;
-		bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t));
-		if ((status = hsvc_register(&nxgep->niu_hsvc,
-		    &nxgep->niu_min_ver)) != 0) {
-			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
-			    "nxge_attach: %s: cannot negotiate "
-			    "hypervisor services revision %d group: 0x%lx "
-			    "major: 0x%lx minor: 0x%lx errno: %d",
-			    niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev,
-			    niu_hsvc.hsvc_group, niu_hsvc.hsvc_major,
-			    niu_hsvc.hsvc_minor, status));
-			return (DDI_FAILURE);
+	int i, j;
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register"));
+	if (nxgep->niu_type != N2_NIU) {
+		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register"));
+		return (DDI_SUCCESS);
+	}
+
+	/*
+	 * Currently, the NIU Hypervisor API supports two major versions:
+	 * version 1 and 2.
+	 * If Hypervisor introduces a higher major or minor version,
+	 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly.
+	 */
+	nxgep->niu_hsvc_available = B_FALSE;
+	bcopy(&niu_hsvc, &nxgep->niu_hsvc,
+	    sizeof (hsvc_info_t));
+
+	for (i = NIU_MAJOR_HI; i > 0; i--) {
+		nxgep->niu_hsvc.hsvc_major = i;
+		for (j = NIU_MINOR_HI; j >= 0; j--) {
+			nxgep->niu_hsvc.hsvc_minor = j;
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			    "nxge_hsvc_register: %s: negotiating "
+			    "hypervisor services revision %d "
+			    "group: 0x%lx major: 0x%lx "
+			    "minor: 0x%lx",
+			    nxgep->niu_hsvc.hsvc_modname,
+			    nxgep->niu_hsvc.hsvc_rev,
+			    nxgep->niu_hsvc.hsvc_group,
+			    nxgep->niu_hsvc.hsvc_major,
+			    nxgep->niu_hsvc.hsvc_minor,
+			    nxgep->niu_min_ver));
+
+			if ((status = hsvc_register(&nxgep->niu_hsvc,
+			    &nxgep->niu_min_ver)) == 0) {
+				/* Use the supported minor */
+				nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver;
+				NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				    "nxge_hsvc_register: %s: negotiated "
+				    "hypervisor services revision %d "
+				    "group: 0x%lx major: 0x%lx "
+				    "minor: 0x%lx (niu_min_ver 0x%lx)",
+				    nxgep->niu_hsvc.hsvc_modname,
+				    nxgep->niu_hsvc.hsvc_rev,
+				    nxgep->niu_hsvc.hsvc_group,
+				    nxgep->niu_hsvc.hsvc_major,
+				    nxgep->niu_hsvc.hsvc_minor,
+				    nxgep->niu_min_ver));
+
+				nxgep->niu_hsvc_available = B_TRUE;
+				NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+				    "<== nxge_hsvc_register: "
+				    "NIU Hypervisor service enabled"));
+				return (DDI_SUCCESS);
+			}
+
+			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+			    "nxge_hsvc_register: %s: negotiated failed - "
+			    "try lower major number "
+			    "hypervisor services revision %d "
+			    "group: 0x%lx major: 0x%lx minor: 0x%lx "
+			    "errno: %d",
+			    nxgep->niu_hsvc.hsvc_modname,
+			    nxgep->niu_hsvc.hsvc_rev,
+			    nxgep->niu_hsvc.hsvc_group,
+			    nxgep->niu_hsvc.hsvc_major,
+			    nxgep->niu_hsvc.hsvc_minor, status));
 		}
-		nxgep->niu_hsvc_available = B_TRUE;
-		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
-		    "NIU Hypervisor service enabled"));
-	}
-
-	return (DDI_SUCCESS);
+	}
+
+	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
+	    "nxge_hsvc_register: %s: cannot negotiate "
+	    "hypervisor services revision %d group: 0x%lx "
+	    "major: 0x%lx minor: 0x%lx errno: %d",
+	    niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev,
+	    niu_hsvc.hsvc_group, niu_hsvc.hsvc_major,
+	    niu_hsvc.hsvc_minor, status));
+
+	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
+	    "<== nxge_hsvc_register: Register to NIU Hypervisor failed"));
+
+	return (DDI_FAILURE);
 }
 #endif
 
@@ -1144,7 +1217,10 @@
 		nxgep->niu_type = N2_NIU;
 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
 		    "nxge_map_regs: N2/NIU devname %s", devname));
-		/* get function number */
+		/*
+		 * Get function number:
+		 *  - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1"
+		 */
 		nxgep->function_num =
 		    (devname[strlen(devname) -1] == '1' ? 1 : 0);
 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
@@ -3949,6 +4025,8 @@
 	case NXGE_GET_TCAM:
 	case NXGE_RTRACE:
 	case NXGE_RDUMP:
+	case NXGE_RX_CLASS:
+	case NXGE_RX_HASH:
 
 		need_privilege = B_FALSE;
 		break;
@@ -3997,6 +4075,19 @@
 		    "==> nxge_m_ioctl: cmd 0x%x", cmd));
 		nxge_hw_ioctl(nxgep, wq, mp, iocp);
 		break;
+	case NXGE_RX_CLASS:
+		if (nxge_rxclass_ioctl(nxgep, wq, mp->b_cont) < 0)
+			miocnak(wq, mp, 0, EINVAL);
+		else
+			miocack(wq, mp, sizeof (rx_class_cfg_t), 0);
+		break;
+	case NXGE_RX_HASH:
+
+		if (nxge_rxhash_ioctl(nxgep, wq, mp->b_cont) < 0)
+			miocnak(wq, mp, 0, EINVAL);
+		else
+			miocack(wq, mp, sizeof (cfg_cmd_t), 0);
+		break;
 	}
 
 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
@@ -6587,11 +6678,16 @@
 		if (nxgep->niu_type == N2_NIU) {
 			hw_p->niu_type = N2_NIU;
 			hw_p->platform_type = P_NEPTUNE_NIU;
+			hw_p->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
 		} else {
 			hw_p->niu_type = NIU_TYPE_NONE;
 			hw_p->platform_type = P_NEPTUNE_NONE;
+			hw_p->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
 		}
 
+		hw_p->tcam = KMEM_ZALLOC(sizeof (tcam_flow_spec_t) *
+		    hw_p->tcam_size, KM_SLEEP);
+
 		MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
 		MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
 		MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
@@ -6697,6 +6793,9 @@
 			}
 			hw_p->nxge_p[nxgep->function_num] = NULL;
 			if (!hw_p->ndevs) {
+				KMEM_FREE(hw_p->tcam,
+				    sizeof (tcam_flow_spec_t) *
+				    hw_p->tcam_size);
 				MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
 				MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
 				MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
--- a/usr/src/uts/common/io/nxge/nxge_rxdma.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/nxge_rxdma.c	Fri Dec 11 10:41:17 2009 -0800
@@ -46,6 +46,7 @@
 extern uint32_t nxge_rbr_size;
 extern uint32_t nxge_rcr_size;
 extern uint32_t	nxge_rbr_spare_size;
+extern uint16_t	nxge_rdc_buf_offset;
 
 extern uint32_t nxge_mblks_pending;
 
@@ -598,7 +599,12 @@
 	    rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
 	    rbr_p->npi_pkt_buf_size2));
 
-	rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
+	if (nxgep->niu_hw_type == NIU_HW_TYPE_RF)
+		rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
+		    &rdc_desc, B_TRUE);
+	else
+		rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
+		    &rdc_desc, B_FALSE);
 	if (rs != NPI_SUCCESS) {
 		return (NXGE_ERROR | rs);
 	}
@@ -2375,6 +2381,36 @@
 		return;
 	}
 
+	switch (nxge_rdc_buf_offset) {
+	case SW_OFFSET_NO_OFFSET:
+		sw_offset_bytes = 0;
+		break;
+	case SW_OFFSET_64:
+		sw_offset_bytes = 64;
+		break;
+	case SW_OFFSET_128:
+		sw_offset_bytes = 128;
+		break;
+	case SW_OFFSET_192:
+		sw_offset_bytes = 192;
+		break;
+	case SW_OFFSET_256:
+		sw_offset_bytes = 256;
+		break;
+	case SW_OFFSET_320:
+		sw_offset_bytes = 320;
+		break;
+	case SW_OFFSET_384:
+		sw_offset_bytes = 384;
+		break;
+	case SW_OFFSET_448:
+		sw_offset_bytes = 448;
+		break;
+	default:
+		sw_offset_bytes = 0;
+		break;
+	}
+
 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
 	    (buf_offset + sw_offset_bytes),
 	    (hdr_size + l2_len),
@@ -3602,7 +3638,9 @@
 	    nxgep->intr_threshold;
 
 	rcrp->full_hdr_flag = B_FALSE;
-	rcrp->sw_priv_hdr_len = 0;
+
+	rcrp->sw_priv_hdr_len = nxge_rdc_buf_offset;
+
 
 	cfga_p = &(rcrp->rcr_cfga);
 	cfgb_p = &(rcrp->rcr_cfgb);
@@ -3660,7 +3698,31 @@
 	    cfig1_p->value, cfig2_p->value));
 
 	cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
-	cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
+	if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) {
+		switch (rcrp->sw_priv_hdr_len) {
+			case SW_OFFSET_NO_OFFSET:
+			case SW_OFFSET_64:
+			case SW_OFFSET_128:
+			case SW_OFFSET_192:
+				cfig2_p->bits.ldw.offset =
+				    rcrp->sw_priv_hdr_len;
+				cfig2_p->bits.ldw.offset256 = 0;
+				break;
+			case SW_OFFSET_256:
+			case SW_OFFSET_320:
+			case SW_OFFSET_384:
+			case SW_OFFSET_448:
+				cfig2_p->bits.ldw.offset =
+				    rcrp->sw_priv_hdr_len & 0x3;
+				cfig2_p->bits.ldw.offset256 = 1;
+				break;
+			default:
+				cfig2_p->bits.ldw.offset = SW_OFFSET_NO_OFFSET;
+				cfig2_p->bits.ldw.offset256 = 0;
+			}
+	} else {
+		cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
+	}
 
 	rbrp->rx_rcr_p = rcrp;
 	rcrp->rx_rbr_p = rbrp;
--- a/usr/src/uts/common/io/nxge/nxge_virtual.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/nxge/nxge_virtual.c	Fri Dec 11 10:41:17 2009 -0800
@@ -1613,6 +1613,9 @@
 {
 	nxge_status_t status = NXGE_OK;
 	p_nxge_hw_list_t hw_p;
+	char **prop_val;
+	uint_t prop_len;
+	uint_t i;
 
 	NXGE_DEBUG_MSG((nxgep, VPD_CTL, " ==> nxge_get_config_properties"));
 
@@ -1717,6 +1720,30 @@
 	    "nxge_get_config_properties: software lso %d\n",
 	    nxgep->soft_lso_enable));
 
+	nxgep->niu_hw_type = NIU_HW_TYPE_DEFAULT;
+	if (nxgep->niu_type == N2_NIU) {
+		/*
+		 * For NIU, the next generation KT has
+		 * a few differences in features that the
+		 * driver needs to handle them
+		 * accordingly.
+		 */
+		if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0,
+		    "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+			for (i = 0; i < prop_len; i++) {
+				if ((strcmp((caddr_t)prop_val[i],
+				    KT_NIU_COMPATIBLE) == 0)) {
+					nxgep->niu_hw_type = NIU_HW_TYPE_RF;
+					NXGE_DEBUG_MSG((nxgep, VPD_CTL,
+					    "NIU type %d", nxgep->niu_hw_type));
+					break;
+				}
+			}
+		}
+
+		ddi_prop_free(prop_val);
+	}
+
 	NXGE_DEBUG_MSG((nxgep, VPD_CTL, " <== nxge_get_config_properties"));
 	return (status);
 }
--- a/usr/src/uts/common/io/pciex/pcie.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/io/pciex/pcie.c	Fri Dec 11 10:41:17 2009 -0800
@@ -240,11 +240,6 @@
 #endif /* defined(__sparc) */
 	}
 
-	if ((pcie_ari_supported(dip) == PCIE_ARI_FORW_SUPPORTED) &&
-	    (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_DISABLED))
-		(void) pcicfg_configure(dip, 0, PCICFG_ALL_FUNC,
-		    PCICFG_FLAG_ENABLE_ARI);
-
 	return (DDI_SUCCESS);
 }
 
--- a/usr/src/uts/common/sys/cpc_impl.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/cpc_impl.h	Fri Dec 11 10:41:17 2009 -0800
@@ -56,9 +56,10 @@
 #define	CPC_COUNT_USER		0x2
 #define	CPC_COUNT_SYSTEM	0x4
 #define	CPC_COUNT_HV		0x8
+#define	CPC_COUNT_SAMPLE_MODE	0x10
 
 #define	KCPC_REQ_ALL_FLAGS	(CPC_OVF_NOTIFY_EMT | CPC_COUNT_USER | \
-		CPC_COUNT_SYSTEM | CPC_COUNT_HV)
+		CPC_COUNT_SYSTEM | CPC_COUNT_HV | CPC_COUNT_SAMPLE_MODE)
 #define	KCPC_REQ_VALID_FLAGS(flags) \
 		(((flags) | KCPC_REQ_ALL_FLAGS) == KCPC_REQ_ALL_FLAGS)
 
--- a/usr/src/uts/common/sys/crypto/common.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/crypto/common.h	Fri Dec 11 10:41:17 2009 -0800
@@ -207,6 +207,7 @@
 #define	SUN_CKM_AES_CCM			"CKM_AES_CCM"
 #define	SUN_CKM_AES_GCM			"CKM_AES_GCM"
 #define	SUN_CKM_AES_GMAC		"CKM_AES_GMAC"
+#define	SUN_CKM_AES_CFB128		"CKM_AES_CFB128"
 #define	SUN_CKM_RC4			"CKM_RC4"
 #define	SUN_CKM_RSA_PKCS		"CKM_RSA_PKCS"
 #define	SUN_CKM_RSA_X_509		"CKM_RSA_X_509"
@@ -454,6 +455,8 @@
 	crypto_version_t	ei_hardware_version;
 	crypto_version_t	ei_firmware_version;
 	uchar_t			ei_time[CRYPTO_EXT_SIZE_TIME];
+	int			ei_hash_max_input_len;
+	int			ei_hmac_max_input_len;
 } crypto_provider_ext_info_t;
 
 typedef uint_t		crypto_session_id_t;
--- a/usr/src/uts/common/sys/crypto/impl.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/crypto/impl.h	Fri Dec 11 10:41:17 2009 -0800
@@ -204,6 +204,8 @@
  *			and other internal flags defined above.
  * pd_hash_limit:	Maximum data size that hash mechanisms of this provider
  * 			can support.
+ * pd_hmac_limit:	Maximum data size that HMAC mechanisms of this provider
+ * 			can support.
  * pd_kcf_prov_handle:	KCF-private handle assigned by KCF
  * pd_prov_id:		Identification # assigned by KCF to provider
  * pd_kstat:		kstat associated with the provider
@@ -232,6 +234,7 @@
 	char				*pd_description;
 	uint_t				pd_flags;
 	uint_t				pd_hash_limit;
+	uint_t				pd_hmac_limit;
 	crypto_kcf_provider_handle_t	pd_kcf_prov_handle;
 	crypto_provider_id_t		pd_prov_id;
 	kstat_t				*pd_kstat;
@@ -590,6 +593,7 @@
 #define	KCF_PROV_NOSTORE_KEY_OPS(pd)	\
 	((pd)->pd_ops_vector->co_nostore_key_ops)
 #define	KCF_PROV_FIPS140_OPS(pd)	((pd)->pd_ops_vector->co_fips140_ops)
+#define	KCF_PROV_PROVMGMT_OPS(pd)	((pd)->pd_ops_vector->co_provider_ops)
 
 /*
  * Wrappers for crypto_control_ops(9S) entry points.
--- a/usr/src/uts/common/sys/crypto/ioctl.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/crypto/ioctl.h	Fri Dec 11 10:41:17 2009 -0800
@@ -131,10 +131,13 @@
 	boolean_t fl_init_pin;
 	boolean_t fl_set_pin;
 
-	boolean_t prov_is_limited;
+	boolean_t prov_is_hash_limited;
 	uint32_t prov_hash_threshold;
 	uint32_t prov_hash_limit;
 
+	boolean_t prov_is_hmac_limited;
+	uint32_t prov_hmac_limit;
+
 	int total_threshold_count;
 	fl_mechs_threshold_t	fl_threshold[MAX_NUM_THRESHOLD];
 } crypto_function_list_t;
--- a/usr/src/uts/common/sys/crypto/spi.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/crypto/spi.h	Fri Dec 11 10:41:17 2009 -0800
@@ -668,13 +668,6 @@
 #define	cm_keysize_unit	cm_mech_flags
 
 /*
- * The following is used by a provider that sets
- * CRYPTO_HASH_NO_UPDATE. It needs to specify the maximum
- * input data size it can digest in this field.
- */
-#define	cm_max_input_length	cm_max_key_length
-
-/*
  * crypto_kcf_provider_handle_t is a handle allocated by the kernel.
  * It is returned after the provider registers with
  * crypto_register_provider(), and must be specified by the provider
@@ -730,9 +723,18 @@
 #define	CRYPTO_HIDE_PROVIDER		0x00000001
 /*
  * provider can not do multi-part digest (updates) and has a limit
- * on maximum input data that it can digest.
+ * on maximum input data that it can digest. The provider sets
+ * this value in crypto_provider_ext_info_t by implementing
+ * the ext_info entry point in the co_provider_ops vector.
  */
 #define	CRYPTO_HASH_NO_UPDATE		0x00000002
+/*
+ * provider can not do multi-part HMAC (updates) and has a limit
+ * on maximum input data that it can hmac. The provider sets
+ * this value in crypto_provider_ext_info_t by implementing
+ * the ext_info entry point in the co_provider_ops vector.
+ */
+#define	CRYPTO_HMAC_NO_UPDATE		0x00000008
 
 /* provider can handle the request without returning a CRYPTO_QUEUED */
 #define	CRYPTO_SYNCHRONOUS		0x00000004
--- a/usr/src/uts/common/sys/nxge/nxge.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/nxge/nxge.h	Fri Dec 11 10:41:17 2009 -0800
@@ -65,6 +65,9 @@
 #define	NXGE_PUT_TCAM		(NXGE_IOC|30)
 #define	NXGE_INJECT_ERR		(NXGE_IOC|40)
 
+#define	NXGE_RX_CLASS		(NXGE_IOC|41)
+#define	NXGE_RX_HASH		(NXGE_IOC|42)
+
 #define	NXGE_OK			0
 #define	NXGE_ERROR		0x40000000
 #define	NXGE_DDI_FAILED		0x20000000
@@ -93,6 +96,9 @@
 
 #define	NXGE_CHECK_TIMER	(5000)
 
+/* KT/NIU OBP creates a compatible property for KT */
+#define	KT_NIU_COMPATIBLE	"SUNW,niusl-kt"
+
 typedef enum {
 	param_instance,
 	param_main_instance,
@@ -794,6 +800,15 @@
 	nxge_ring_group_t	rx_hio_groups[NXGE_MAX_RDC_GROUPS];
 
 	nxge_share_handle_t	shares[NXGE_MAX_VRS];
+
+	/*
+	 * KT-NIU:
+	 *	KT family will have up to 4 NIUs per system.
+	 *	Differences between N2/NIU and KT/NIU:
+	 *		SerDes, Hypervisor interfaces,
+	 *		additional NIU classification features.
+	 */
+	niu_hw_type_t		niu_hw_type;
 };
 
 /*
--- a/usr/src/uts/common/sys/nxge/nxge_common.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/nxge/nxge_common.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -499,6 +499,14 @@
 #define	FZC_READ_ONLY			0x04
 } nxge_part_cfg_t, *p_nxge_part_cfg_t;
 
+typedef struct nxge_usr_l3_cls {
+	uint64_t		cls;
+	uint16_t		tcam_ref_cnt;
+	uint8_t			pid;
+	uint8_t			flow_pkt_type;
+	uint8_t			valid;
+} nxge_usr_l3_cls_t, *p_nxge_usr_l3_cls_t;
+
 typedef struct nxge_hw_list {
 	struct nxge_hw_list 	*next;
 	nxge_os_mutex_t 	nxge_cfg_lock;
@@ -536,6 +544,10 @@
 	uint32_t		platform_type;
 	uint8_t			xcvr_addr[NXGE_MAX_PORTS];
 	uintptr_t		hio;
+	void			*tcam;
+	uint32_t 		tcam_size;
+	uint64_t		tcam_l2_prog_cls[NXGE_L2_PROG_CLS];
+	nxge_usr_l3_cls_t	tcam_l3_prog_cls[NXGE_L3_PROG_CLS];
 } nxge_hw_list_t, *p_nxge_hw_list_t;
 
 #ifdef	__cplusplus
--- a/usr/src/uts/common/sys/nxge/nxge_defs.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/nxge/nxge_defs.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -225,7 +225,12 @@
 #define	SW_OFFSET_NO_OFFSET		0
 #define	SW_OFFSET_64			1	/* 64 bytes */
 #define	SW_OFFSET_128			2	/* 128 bytes */
-#define	SW_OFFSET_INVALID		3
+/* The following additional offsets are defined for Neptune-L and RF-NIU */
+#define	SW_OFFSET_192			3
+#define	SW_OFFSET_256			4
+#define	SW_OFFSET_320			5
+#define	SW_OFFSET_384			6
+#define	SW_OFFSET_448			7
 
 #define	TDC_DEFAULT_MAX		8192
 /*
@@ -336,8 +341,11 @@
 
 #define	TCAM_FLOW_KEY_MAX_CLASS		12
 #define	TCAM_L3_MAX_USER_CLASS		4
+#define	TCAM_MAX_ENTRY			256
 #define	TCAM_NIU_TCAM_MAX_ENTRY		128
 #define	TCAM_NXGE_TCAM_MAX_ENTRY	256
+#define	NXGE_L2_PROG_CLS		2
+#define	NXGE_L3_PROG_CLS		4
 
 
 
--- a/usr/src/uts/common/sys/nxge/nxge_fflp.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/nxge/nxge_fflp.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,15 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef	_SYS_NXGE_NXGE_FFLP_H
 #define	_SYS_NXGE_NXGE_FFLP_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifdef	__cplusplus
 extern "C" {
 #endif
@@ -131,6 +129,7 @@
 	tcam_entry_t tce;
 	uint64_t flags;
 	uint64_t user_info;
+	uint8_t valid;
 } tcam_flow_spec_t, *p_tcam_flow_spec_t;
 
 
@@ -204,6 +203,7 @@
 	nxge_os_mutex_t		fcram_lock;
 	nxge_os_mutex_t		hash_lock[MAX_PARTITION];
 	uint32_t 		tcam_size;
+	uint32_t		tcam_entry_cnt;
 	uint32_t 		state;
 #define	NXGE_FFLP_HW_RESET	0x1
 #define	NXGE_FFLP_HW_INIT	0x2
@@ -211,8 +211,15 @@
 #define	NXGE_FFLP_FCRAM_PART	0x80000000
 	p_nxge_fflp_stats_t	fflp_stats;
 
-	tcam_flow_spec_t    *tcam_entries;
-	uint8_t		    tcam_location;
+	tcam_flow_spec_t    	*tcam_entries;
+	uint8_t			tcam_top;
+	uint8_t			tcam_location;
+	uint64_t		tcam_l2_prog_cls[NXGE_L2_PROG_CLS];
+	uint64_t		tcam_l3_prog_cls[NXGE_L3_PROG_CLS];
+	uint64_t		tcam_key[12];
+	uint64_t		flow_key[12];
+	uint16_t		tcam_l3_prog_cls_refcnt[NXGE_L3_PROG_CLS];
+	uint8_t			tcam_l3_prog_cls_pid[NXGE_L3_PROG_CLS];
 #define	NXGE_FLOW_NO_SUPPORT  0x0
 #define	NXGE_FLOW_USE_TCAM    0x1
 #define	NXGE_FLOW_USE_FCRAM   0x2
--- a/usr/src/uts/common/sys/nxge/nxge_fflp_hw.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/nxge/nxge_fflp_hw.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -167,6 +167,29 @@
 #endif
 	} bits;
 } tcam_class_prg_ip_t, *p_tcam_class_prg_ip_t;
+
+/*
+ * New fields added to the L3 programmable class register for RF-NIU
+ * and Neptune-L.
+ */
+#define	L3_UCLS_TOS_SH		0
+#define	L3_UCLS_TOS_MSK		0xff
+#define	L3_UCLS_TOSM_SH		8
+#define	L3_UCLS_TOSM_MSK	0xff
+#define	L3_UCLS_PID_SH		16
+#define	L3_UCLS_PID_MSK		0xff
+#define	L3_UCLS_VALID_SH	25
+#define	L3_UCLS_VALID_MSK	0x01
+#define	L3_UCLS_L4B23_SEL_SH	26
+#define	L3_UCLS_L4B23_SEL_MSK	0x01
+#define	L3_UCLS_L4B23_VAL_SH	27
+#define	L3_UCLS_L4B23_VAL_MSK	0xffff
+#define	L3_UCLS_L4B0_MASK_SH	43
+#define	L3_UCLS_L4B0_MASK_MSK	0xff
+#define	L3_UCLS_L4B0_VAL_SH	51
+#define	L3_UCLS_L4B0_VAL_MSK	0xff
+#define	L3_UCLS_L4_MODE_SH	59
+#define	L3_UCLS_L4_MODE_MSK	0x01
 /* define the classes which use the above structure */
 
 typedef enum fflp_tcam_class {
@@ -192,9 +215,11 @@
     TCAM_CLASS_DUMMY_13,
     TCAM_CLASS_DUMMY_14,
     TCAM_CLASS_DUMMY_15,
-    TCAM_CLASS_MAX
+    TCAM_CLASS_IPV6_FRAG = 0x1F
 } tcam_class_t;
 
+#define	TCAM_CLASS_MAX	TCAM_CLASS_IPV6_FRAG
+
 /*
  * Specify how to build TCAM key for L3
  * IP Classes. Both User configured and
@@ -867,6 +892,13 @@
 #define		FFLP_FLOW_KEY_IP6_UDP_REG		(FZC_FFLP + 0x40048)
 #define		FFLP_FLOW_KEY_IP6_AH_ESP_REG	(FZC_FFLP + 0x40050)
 #define		FFLP_FLOW_KEY_IP6_SCTP_REG		(FZC_FFLP + 0x40058)
+/*
+ * New FLOW KEY register added for IPV6 Fragments for RF-NIU
+ * and Neptune-L.
+ */
+#define		FFLP_FLOW_KEY_IP6_FRAG_REG		(FZC_FFLP + 0x400B0)
+
+#define	FL_KEY_USR_L4XOR_MSK	0x03ff
 
 typedef union _flow_class_key_ip_t {
     uint64_t value;
@@ -876,7 +908,12 @@
 #endif
 		struct {
 #ifdef _BIT_FIELDS_HTOL
-			uint32_t rsrvd2:22;
+			uint32_t rsrvd2:10;
+/* These bits added for L3 programmable classes in RF-NIU and Neptune-L */
+			uint32_t l4_xor:10;
+			uint32_t l4_mode:1;
+/* This bit added for SNORT support in RF-NIU and Neptune-L */
+			uint32_t sym:1;
 			uint32_t port:1;
 			uint32_t l2da:1;
 			uint32_t vlan:1;
@@ -894,7 +931,10 @@
 			uint32_t vlan:1;
 			uint32_t l2da:1;
 			uint32_t port:1;
-			uint32_t rsrvd2:22;
+			uint32_t sym:1;
+			uint32_t l4_mode:1;
+			uint32_t l4_xor:10;
+			uint32_t rsrvd2:10;
 #endif
 		} ldw;
 #ifndef _BIG_ENDIAN
@@ -903,7 +943,6 @@
 	} bits;
 } flow_class_key_ip_t, *p_flow_class_key_ip_t;
 
-
 #define		FFLP_H1POLY_REG		(FZC_FFLP + 0x40060)
 
 
@@ -1379,7 +1418,11 @@
 
 
 typedef struct _flow_key_cfg_t {
-    uint32_t rsrvd:23;
+    uint32_t rsrvd:11;
+/* The following 3 bit fields added for RF-NIU and Neptune-L */
+    uint32_t l4_xor_sel:10;
+    uint32_t use_l4_md:1;
+    uint32_t use_sym:1;
     uint32_t use_portnum:1;
     uint32_t use_l2da:1;
     uint32_t use_vlan:1;
--- a/usr/src/uts/common/sys/nxge/nxge_flow.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/nxge/nxge_flow.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -38,6 +38,7 @@
 	in_addr_t  ip4dst;
 	in_port_t  psrc;
 	in_port_t  pdst;
+	uint8_t	   tos;
 } tcpip4_spec_t;
 
 typedef struct tcpip6_spec_s {
@@ -45,6 +46,7 @@
 	struct in6_addr ip6dst;
 	in_port_t  psrc;
 	in_port_t  pdst;
+	uint8_t	   tos;
 } tcpip6_spec_t;
 
 typedef struct udpip4_spec_s {
@@ -52,6 +54,7 @@
 	in_addr_t  ip4dst;
 	in_port_t  psrc;
 	in_port_t  pdst;
+	uint8_t	   tos;
 } udpip4_spec_t;
 
 typedef struct udpip6_spec_s {
@@ -59,18 +62,21 @@
 	struct in6_addr ip6dst;
 	in_port_t  psrc;
 	in_port_t  pdst;
+	uint8_t	   tos;
 } udpip6_spec_t;
 
 typedef struct ahip4_spec_s {
 	in_addr_t  ip4src;
 	in_addr_t  ip4dst;
 	uint32_t   spi;
+	uint8_t	   tos;
 } ahip4_spec_t;
 
 typedef struct ahip6_spec_s {
 	struct in6_addr ip6src;
 	struct in6_addr ip6dst;
 	uint32_t   spi;
+	uint8_t	   tos;
 } ahip6_spec_t;
 
 typedef ahip4_spec_t espip4_spec_t;
@@ -96,18 +102,31 @@
 } ether_spec_t;
 
 
+#define	FSPEC_IP4	1
+#define	FSPEC_IP6	2
+
 typedef struct ip_user_spec_s {
-	uint8_t    id;
-	uint8_t    ip_ver;
-	uint8_t    proto;
-	uint8_t    tos_mask;
-	uint8_t    tos;
+	uint32_t	ip4src;
+	uint32_t	ip4dst;
+	uint32_t	l4_4_bytes;
+	uint8_t    	tos;
+	uint8_t    	ip_ver;
+	uint8_t    	proto;
 } ip_user_spec_t;
 
+typedef struct ip6_frag_spec_s {
+	struct in6_addr ip6src;
+	struct in6_addr ip6dst;
+	uint32_t	l4_4_bytes;
+	uint8_t    	tos;
+	uint8_t    	proto;	/* should be 44 */
+} ip6_frag_spec_t;
+
+
 typedef ether_spec_t arpip_spec_t;
 typedef ether_spec_t ether_user_spec_t;
 
-typedef struct flow_spec_s {
+struct flow_spec_s {
 	uint32_t  flow_type;
 	union {
 		tcpip4_spec_t tcpip4spec;
@@ -123,9 +142,12 @@
 		rawip6_spec_t rawip6spec;
 		ether_spec_t  etherspec;
 		ip_user_spec_t  ip_usr_spec;
+		ip6_frag_spec_t  ip6_frag_spec;
 		uint8_t		hdata[64];
 	} uh, um; /* entry, mask */
-} flow_spec_t;
+} __attribute__((packed));
+
+typedef struct flow_spec_s flow_spec_t;
 
 #define	FSPEC_TCPIP4	0x1	/* TCP/IPv4 Flow */
 #define	FSPEC_TCPIP6	0x2	/* TCP/IPv6 */
@@ -136,13 +158,14 @@
 #define	FSPEC_AHIP6	0x7	/* AH/IP6   */
 #define	FSPEC_ESPIP4	0x8	/* ESP/IP4  */
 #define	FSPEC_ESPIP6	0x9	/* ESP/IP6  */
-#define	FSPEC_SCTPIP4	0xA	/* ESP/IP4  */
-#define	FSPEC_SCTPIP6	0xB	/* ESP/IP6  */
-#define	FSPEC_RAW4	0xC	/* RAW/IP4  */
-#define	FSPEC_RAW6	0xD	/* RAW/IP6  */
-#define	FSPEC_ETHER	0xE	/* ETHER Programmable  */
-#define	FSPEC_IP_USR	0xF	/* IP Programmable  */
-#define	FSPEC_HDATA	0x10	/* Pkt Headers eth-da,sa,etype,ip,tcp(Bitmap) */
+#define	FSPEC_SCTPIP4	0xA	/* SCTP/IP4  */
+#define	FSPEC_SCTPIP6	0xB	/* SCTP/IP6  */
+#define	FSPEC_IP6FRAG	0xC	/* IPv6 Fragments */
+#define	FSPEC_RAW4	0xD	/* RAW/IP4  */
+#define	FSPEC_RAW6	0xE	/* RAW/IP6  */
+#define	FSPEC_ETHER	0xF	/* ETHER Programmable  */
+#define	FSPEC_IP_USR	0x10	/* IP Programmable  */
+#define	FSPEC_HDATA	0x11	/* Pkt Headers eth-da,sa,etype,ip,tcp(Bitmap) */
 
 #define	TCAM_IPV6_ADDR(m32, ip6addr) {		\
 		m32[0] = ip6addr.S6_addr32[0]; \
@@ -151,8 +174,22 @@
 		m32[3] = ip6addr.S6_addr32[3]; \
 	}
 
+#define	FSPEC_IPV6_ADDR(ip6addr, m32) {		\
+	ip6addr.S6_addr32[0] = m32[0];		\
+	ip6addr.S6_addr32[1] = m32[1];		\
+	ip6addr.S6_addr32[2] = m32[2];		\
+	ip6addr.S6_addr32[3] = m32[3];		\
+}
+
 #define	TCAM_IPV4_ADDR(m32, ip4addr) (m32 = ip4addr)
+#define	FSPEC_IPV4_ADDR(ip4addr, m32) (ip4addr = m32)
+
 #define	TCAM_IP_PORTS(port32, dp, sp)	  (port32 = dp | (sp << 16))
+#define	FSPEC_IP_PORTS(dp, sp, port32) {	\
+	dp = port32 & 0xff;			\
+	sp = port32 >> 16;			\
+}
+
 #define	TCAM_IP_CLASS(key, mask, class)	  {		\
 		key = class; \
 		mask = 0x1f; \
@@ -163,12 +200,102 @@
 		mask = 0xff; \
 	}
 
-
-typedef struct flow_resource_s {
+struct flow_resource_s {
 	uint64_t channel_cookie;
 	uint64_t flow_cookie;
+	uint64_t location;
 	flow_spec_t flow_spec;
-} flow_resource_t;
+} __attribute__((packed));
+
+typedef struct flow_resource_s flow_resource_t;
+
+/* ioctl data structure and cmd types for configuring rx classification */
+
+#define	NXGE_RX_CLASS_GCHAN	0x01
+#define	NXGE_RX_CLASS_GRULE_CNT	0x02
+#define	NXGE_RX_CLASS_GRULE	0x03
+#define	NXGE_RX_CLASS_GRULE_ALL	0x04
+#define	NXGE_RX_CLASS_RULE_DEL	0x05
+#define	NXGE_RX_CLASS_RULE_INS	0x06
+
+#define	NXGE_PKT_DISCARD	0xffffffffffffffffULL
+
+struct rx_class_cfg_s {
+	uint32_t cmd;
+	uint32_t data; /* the rule DB size or the # rx rings */
+	uint64_t rule_cnt;
+	uint32_t rule_locs[256];
+	flow_resource_t fs;
+} __attribute__((packed));
+
+typedef struct rx_class_cfg_s rx_class_cfg_t;
+
+/*
+ * ioctl data structure and cmd types for configuring rx hash
+ * for IP tunneled traffic and symmetric mode.
+ */
+
+#define	NXGE_IPTUN_CFG_ADD_CLS	0x07
+#define	NXGE_IPTUN_CFG_SET_HASH	0x08
+#define	NXGE_IPTUN_CFG_DEL_CLS	0x09
+#define	NXGE_IPTUN_CFG_GET_CLS	0x0a
+#define	NXGE_CLS_CFG_SET_SYM	0x0b
+#define	NXGE_CLS_CFG_GET_SYM	0x0c
+
+#define	IPTUN_PKT_IPV4		1
+#define	IPTUN_PKT_IPV6		2
+#define	IPTUN_PKT_GRE		3
+#define	IPTUN_PKT_GTP		4
+#define	OTHER_USR_PKT		5
+
+#define	SEL_L4B_0_3		0x0001
+#define	SEL_L4B_4_7		0x0002
+#define	SEL_L4B_8_11		0x0004
+#define	SEL_L4B_12_15		0x0008
+#define	SEL_L4B_16_19		0x0010
+#define	SEL_L4B_20_23		0x0020
+#define	SEL_L4B_24_27		0x0040
+#define	SEL_L4B_28_31		0x0080
+#define	SEL_L4B_32_35		0x0100
+#define	SEL_L4B_36_39		0x0200
+
+#define	HASH_IFPORT		0x0001
+#define	HASH_L2DA		0x0002
+#define	HASH_VLAN		0x0004
+#define	HASH_IPSA		0x0008
+#define	HASH_IPDA		0x0010
+#define	HASH_L3PROTO		0x0020
+
+#define	CLS_TCPV4		0x08
+#define	CLS_UDPV4		0x09
+#define	CLS_AHESPV4		0x0A
+#define	CLS_SCTPV4		0x0B
+#define	CLS_TCPV6		0x0C
+#define	CLS_UDPV6		0x0D
+#define	CLS_AHESPV6		0x0E
+#define	CLS_SCTPV6		0x0F
+#define	CLS_IPV6FRAG		0x1F
+
+struct _iptun_cfg {
+	uint8_t		in_pkt_type;
+	uint8_t		l4b0_val;
+	uint8_t		l4b0_mask;
+	uint8_t		l4b23_sel;
+	uint16_t	l4b23_val;
+	uint16_t	l4xor_sel;
+	uint8_t		hash_flags;
+} __attribute__((packed));
+
+typedef struct _iptun_cfg iptun_cfg_t;
+
+struct _cfg_cmd {
+	uint16_t cmd;
+	uint8_t sym;
+	uint8_t	class_id;
+	iptun_cfg_t	iptun_cfg;
+} __attribute__((packed));
+
+typedef struct _cfg_cmd cfg_cmd_t;
 
 #ifdef	__cplusplus
 }
--- a/usr/src/uts/common/sys/nxge/nxge_hio.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/nxge/nxge_hio.h	Fri Dec 11 10:41:17 2009 -0800
@@ -58,12 +58,19 @@
 typedef hv_rv_t (*vr_unassign)(uint32_t);
 typedef hv_rv_t (*vr_getinfo)(uint32_t, uint64_t *, uint64_t *);
 
+/* HV 2.0 API group functions */
+typedef hv_rv_t (*vr_cfgh_assign)(uint64_t, uint64_t, uint64_t, uint32_t *);
+typedef hv_rv_t (*vrlp_cfgh_conf)(uint64_t, uint64_t, uint64_t, uint64_t,
+    uint64_t);
+typedef hv_rv_t (*vrlp_cfgh_info)(uint64_t, uint64_t, uint64_t, uint64_t *,
+    uint64_t *);
+
 
 typedef struct {
-	vr_assign	assign;
+	vr_assign	assign;		/* HV Major 1 interface */
+	vr_cfgh_assign	cfgh_assign;	/* HV Major 2 interface */
 	vr_unassign	unassign;
 	vr_getinfo	getinfo;
-
 } nxhv_vr_fp_t;
 
 typedef hv_rv_t (*vrlp_conf)(uint64_t, uint64_t, uint64_t, uint64_t);
@@ -82,9 +89,10 @@
 	dc_getstate	getstate;
 	dc_get_map	get_map;
 
-	vrlp_conf	lp_conf;
-	vrlp_info	lp_info;
-
+	vrlp_conf	lp_conf;	/* HV Major 1 interface */
+	vrlp_info	lp_info;	/* HV Major 1 interface */
+	vrlp_cfgh_conf	lp_cfgh_conf;	/* HV Major 2 interface */
+	vrlp_cfgh_info	lp_cfgh_info;	/* HV Major 2 interface */
 	dc_getinfo	getinfo;
 } nxhv_dc_fp_t;
 
--- a/usr/src/uts/common/sys/nxge/nxge_impl.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/nxge/nxge_impl.h	Fri Dec 11 10:41:17 2009 -0800
@@ -32,9 +32,17 @@
 
 /*
  * NIU HV API version definitions.
+ *
+ * If additional major (HV API) is to be supported,
+ * please increment NIU_MAJOR_HI.
+ * If additional minor # is to be supported,
+ * please increment NIU_MINOR_HI.
  */
+#define	NIU_MAJOR_HI		2
+#define	NIU_MINOR_HI		1
 #define	NIU_MAJOR_VER		1
 #define	NIU_MINOR_VER		1
+#define	NIU_MAJOR_VER_2		2
 
 #if defined(sun4v)
 
@@ -430,6 +438,17 @@
 } niu_type_t;
 
 /*
+ * The niu_hw_type is for non-PHY related functions
+ * designed on various versions of NIU chips (i.e. RF/NIU has
+ * additional classification features and communicates with
+ * a different SerDes than N2/NIU).
+ */
+typedef enum {
+	NIU_HW_TYPE_DEFAULT = 0,	/* N2/NIU */
+	NIU_HW_TYPE_RF = 1,		/* RF/NIU */
+} niu_hw_type_t;
+
+/*
  * P_NEPTUNE_GENERIC:
  *	The cover-all case for Neptune (as opposed to NIU) where we do not
  *	care the exact platform as we do not do anything that is platform
@@ -849,6 +868,9 @@
 nxge_status_t nxge_fflp_init_hostinfo(p_nxge_t);
 
 void nxge_handle_tcam_fragment_bug(p_nxge_t);
+int nxge_rxclass_ioctl(p_nxge_t, queue_t *, mblk_t *);
+int nxge_rxhash_ioctl(p_nxge_t, queue_t *, mblk_t *);
+
 nxge_status_t nxge_fflp_hw_reset(p_nxge_t);
 nxge_status_t nxge_fflp_handle_sys_errors(p_nxge_t);
 nxge_status_t nxge_zcp_handle_sys_errors(p_nxge_t);
@@ -1125,6 +1147,26 @@
     uint64_t pgidx, uint64_t *raddr, uint64_t *size);
 #pragma weak	hv_niu_vrtx_logical_page_info
 
+uint64_t hv_niu_cfgh_rx_logical_page_conf(uint64_t, uint64_t, uint64_t,
+	uint64_t, uint64_t);
+#pragma weak	hv_niu_rx_logical_page_conf
+
+uint64_t hv_niu_cfgh_rx_logical_page_info(uint64_t, uint64_t, uint64_t,
+	uint64_t *, uint64_t *);
+#pragma weak	hv_niu_rx_logical_page_info
+
+uint64_t hv_niu_cfgh_tx_logical_page_conf(uint64_t, uint64_t, uint64_t,
+	uint64_t, uint64_t);
+#pragma weak	hv_niu_tx_logical_page_conf
+
+uint64_t hv_niu_cfgh_tx_logical_page_info(uint64_t, uint64_t, uint64_t,
+	uint64_t *, uint64_t *);
+#pragma weak	hv_niu_tx_logical_page_info
+
+uint64_t hv_niu_cfgh_vr_assign(uint64_t, uint64_t vridx, uint64_t ldc_id,
+	uint32_t *cookie);
+#pragma weak	hv_niu_vr_assign
+
 //
 // NIU-specific interrupt API
 //
--- a/usr/src/uts/common/sys/nxge/nxge_n2_esr_hw.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/nxge/nxge_n2_esr_hw.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,15 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef _SYS_NXGE_NXGE_N2_ESR_HW_H
 #define	_SYS_NXGE_NXGE_N2_ESR_HW_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifdef	__cplusplus
 extern "C" {
 #endif
@@ -356,6 +354,324 @@
 #define	TESTCFG_INNER_CML_DIS_LOOPBACK	0x2
 #define	TESTCFG_INNER_CML_EN_LOOOPBACK	0x3
 
+/*
+ * Definitions for TI WIZ7c2xxn5x1 Macro Family (KT/NIU).
+ */
+
+/* PLL_CFG: PLL Configuration Low 16-bit word */
+typedef	union _k_esr_ti_cfgpll_l {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res2		: 1;
+		uint16_t clkbyp		: 2;
+		uint16_t lb		: 2;
+		uint16_t res1		: 1;
+		uint16_t vrange		: 1;
+		uint16_t divclken	: 1;
+		uint16_t mpy		: 7;
+		uint16_t enpll		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t enpll		: 1;
+		uint16_t mpy		: 7;
+		uint16_t divclken	: 1;
+		uint16_t vrange		: 1;
+		uint16_t res1		: 1;
+		uint16_t lb		: 2;
+		uint16_t clkbyp		: 2;
+		uint16_t res2		: 1;
+#endif
+	} bits;
+} k_esr_ti_cfgpll_l_t;
+
+/* PLL Configurations */
+#define	K_CFGPLL_ENABLE_PLL		1
+#define	K_CFGPLL_MPY_4X			0x10
+#define	K_CFGPLL_MPY_5X			0x14
+#define	K_CFGPLL_MPY_6X			0x18
+#define	K_CFGPLL_MPY_8X			0x20
+#define	K_CFGPLL_MPY_8P25X		0x21
+#define	K_CFGPLL_MPY_10X		0x28
+#define	K_CFGPLL_MPY_12X		0x30
+#define	K_CFGPLL_MPY_12P5X		0x32
+#define	K_CFGPLL_MPY_15X		0x3c
+#define	K_CFGPLL_MPY_16X		0x40
+#define	K_CFGPLL_MPY_16P5X		0x42
+#define	K_CFGPLL_MPY_20X		0x50
+#define	K_CFGPLL_MPY_22X		0x58
+#define	K_CFGPLL_MPY_25X		0x64
+#define	K_CFGPLL_ENABLE_DIVCLKEN	0x100
+
+/* PLL_STS */
+typedef	union _k_esr_ti_pll_sts {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res2		: 12;
+		uint16_t res1		: 2;
+		uint16_t divclk		: 1;
+		uint16_t lock		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t lock		: 1;
+		uint16_t divclk		: 1;
+		uint16_t res1		: 2;
+		uint16_t res2		: 12;
+#endif
+	} bits;
+} k_esr_ti_pll_sts_t;
+
+/* TEST_CFT */
+typedef	union _kt_esr_ti_testcfg {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res		: 7;
+		uint16_t testpatt2	: 3;
+		uint16_t testpatt1	: 3;
+		uint16_t enbspt		: 1;
+		uint16_t enbsrx		: 1;
+		uint16_t enbstx		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t enbstx		: 1;
+		uint16_t enbsrx		: 1;
+		uint16_t enbspt		: 1;
+		uint16_t testpatt1	: 3;
+		uint16_t testpatt2	: 3;
+		uint16_t res		: 7;
+#endif
+	} bits;
+} k_esr_ti_testcfg_t;
+
+#define	K_TESTCFG_ENBSTX		0x1
+#define	K_TESTCFG_ENBSRX		0x2
+#define	K_TESTCFG_ENBSPT		0x4
+
+/* TX_CFG: Tx Configuration Low 16-bit word */
+
+typedef	union _k_esr_ti_cfgtx_l {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t de		: 3;
+		uint16_t swing		: 4;
+		uint16_t cm		: 1;
+		uint16_t invpair	: 1;
+		uint16_t rate		: 2;
+		uint16_t buswwidth	: 4;
+		uint16_t entx		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t entx		: 1;
+		uint16_t buswwidth	: 4;
+		uint16_t rate		: 2;
+		uint16_t invpair	: 1;
+		uint16_t cm		: 1;
+		uint16_t swing		: 4;
+		uint16_t de		: 3;
+#endif
+	} bits;
+} k_esr_ti_cfgtx_l_t;
+
+/* Tx Configuration High 16-bit word */
+
+typedef	union _k_esr_ti_cfgtx_h {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res3		: 1;
+		uint16_t bstx		: 1;
+		uint16_t res2		: 1;
+		uint16_t loopback	: 2;
+		uint16_t rdtct		: 2;
+		uint16_t enidl		: 1;
+		uint16_t rsync		: 1;
+		uint16_t msync		: 1;
+		uint16_t res1		: 4;
+		uint16_t de		: 2;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t de		: 2;
+		uint16_t res1		: 4;
+		uint16_t msync		: 1;
+		uint16_t rsync		: 1;
+		uint16_t enidl		: 1;
+		uint16_t rdtct		: 2;
+		uint16_t loopback	: 2;
+		uint16_t res2		: 1;
+		uint16_t bstx		: 1;
+		uint16_t res3		: 1;
+#endif
+	} bits;
+} k_esr_ti_cfgtx_h_t;
+
+/* Transmit Configurations (TBD) */
+#define	K_CFGTX_ENABLE_TX		0x1
+#define	K_CFGTX_ENABLE_MSYNC		0x1
+
+#define	K_CFGTX_BUSWIDTH_10BIT		0
+#define	K_CFGTX_BUSWIDTH_8BIT		1
+#define	K_CFGTX_RATE_FULL		0
+#define	K_CFGTX_RATE_HALF		0x1
+#define	K_CFGTX_RATE_QUAD		2
+#define	K_CFGTX_SWING_125MV		0
+#define	K_CFGTX_SWING_250MV		1
+#define	K_CFGTX_SWING_500MV		2
+#define	K_CFGTX_SWING_625MV		3
+#define	K_CFGTX_SWING_750MV		4
+#define	K_CFGTX_SWING_1000MV		5
+#define	K_CFGTX_SWING_1250MV		6
+#define	K_CFGTX_SWING_1375MV		7
+#define	K_CFGTX_SWING_2000MV		0xf
+#define	K_CFGTX_DE_0			0
+#define	K_CFGTX_DE_4P76			1
+#define	K_CFGTX_DE_9P52			2
+#define	K_CFGTX_DE_14P28		3
+#define	K_CFGTX_DE_19P04		4
+#define	K_CFGTX_DE_23P8			5
+#define	K_CFGTX_DE_28P56		6
+#define	K_CFGTX_DE_33P32		7
+#define	K_CFGTX_DIS_LOOPBACK		0x0
+#define	K_CFGTX_BUMP_PAD_LOOPBACK	0x1
+#define	K_CFGTX_INNER_CML_DIS_LOOPBACK	0x2
+#define	K_CFGTX_INNER_CML_ENA_LOOPBACK	0x3
+
+/* TX_STS */
+typedef	union _k_esr_ti_tx_sts {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res1		: 14;
+		uint16_t rdtctip	: 1;
+		uint16_t testfail	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t testfail	: 1;
+		uint16_t rdtctip	: 1;
+		uint16_t res1		: 14;
+#endif
+	} bits;
+} k_esr_ti_tx_sts_t;
+
+/* Rx Configuration Low 16-bit word */
+
+typedef	union _k_esr_ti_cfgrx_l {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t los		: 3;
+		uint16_t align		: 2;
+		uint16_t term		: 3;
+		uint16_t invpair	: 1;
+		uint16_t rate		: 2;
+		uint16_t buswidth	: 4;
+		uint16_t enrx		: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t enrx		: 1;
+		uint16_t buswidth	: 4;
+		uint16_t rate		: 2;
+		uint16_t invpair	: 1;
+		uint16_t term		: 3;
+		uint16_t align		: 2;
+		uint16_t los		: 3;
+#endif
+	} bits;
+} k_esr_ti_cfgrx_l_t;
+
+/* Rx Configuration High 16-bit word */
+
+typedef	union _k_esr_ti_cfgrx_h {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res2		: 1;
+		uint16_t bsinrxn	: 1;
+		uint16_t bsinrxp	: 1;
+		uint16_t loopback	: 2;
+		uint16_t res1		: 3;
+		uint16_t enoc		: 1;
+		uint16_t eq		: 4;
+		uint16_t cdr		: 3;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t cdr		: 3;
+		uint16_t eq		: 4;
+		uint16_t enoc		: 1;
+		uint16_t res1		: 3;
+		uint16_t loopback	: 2;
+		uint16_t bsinrxp	: 1;
+		uint16_t bsinrxn	: 1;
+		uint16_t res2		: 1;
+#endif
+	} bits;
+} k_esr_ti_cfgrx_h_t;
+
+/* Receive Configurations (TBD) */
+#define	K_CFGRX_ENABLE_RX			0x1
+
+#define	K_CFGRX_BUSWIDTH_10BIT			0
+#define	K_CFGRX_BUSWIDTH_8BIT			1
+#define	K_CFGRX_RATE_FULL			0
+#define	K_CFGRX_RATE_HALF			1
+#define	K_CFGRX_RATE_QUAD			2
+#define	K_CFGRX_TERM_VDDT			0
+#define	K_CFGRX_TERM_0P8VDDT			1
+#define	K_CFGRX_TERM_FLOAT			3
+#define	K_CFGRX_ALIGN_DIS			0x0
+#define	K_CFGRX_ALIGN_EN			0x1
+#define	K_CFGRX_ALIGN_JOG			0x2
+#define	K_CFGRX_LOS_DIS				0x0
+#define	K_CFGRX_LOS_ENABLE			0x2
+#define	K_CFGRX_CDR_1ST_ORDER			0
+#define	K_CFGRX_CDR_2ND_ORDER_HP		1
+#define	K_CFGRX_CDR_2ND_ORDER_MP		2
+#define	K_CFGRX_CDR_2ND_ORDER_LP		3
+#define	K_CFGRX_CDR_1ST_ORDER_FAST_LOCK		4
+#define	K_CFGRX_CDR_2ND_ORDER_HP_FAST_LOCK	5
+#define	K_CFGRX_CDR_2ND_ORDER_MP_FAST_LOCK	6
+#define	K_CFGRX_CDR_2ND_ORDER_LP_FAST_LOCK	7
+#define	K_CFGRX_EQ_MAX_LF_ZF			0
+#define	K_CFGRX_EQ_ADAPTIVE			0x1
+#define	K_CFGRX_EQ_ADAPTIVE_LF_365MHZ_ZF	0x8
+#define	K_CFGRX_EQ_ADAPTIVE_LF_275MHZ_ZF	0x9
+#define	K_CFGRX_EQ_ADAPTIVE_LP_195MHZ_ZF	0xa
+#define	K_CFGRX_EQ_ADAPTIVE_LP_140MHZ_ZF	0xb
+#define	K_CFGRX_EQ_ADAPTIVE_LP_105MHZ_ZF	0xc
+#define	K_CFGRX_EQ_ADAPTIVE_LP_75MHZ_ZF		0xd
+#define	K_CFGRX_EQ_ADAPTIVE_LP_55MHZ_ZF		0xe
+#define	K_CFGRX_EQ_ADAPTIVE_LP_50HZ_ZF		0xf
+
+/* Rx Status Low 16-bit word */
+
+typedef	union _k_esr_ti_stsrx_l {
+	uint16_t value;
+
+	struct {
+#if defined(_BIT_FIELDS_HTOL)
+		uint16_t res2		: 10;
+		uint16_t bsrxn		: 1;
+		uint16_t bsrxp		: 1;
+		uint16_t losdtct	: 1;
+		uint16_t res1		: 1;
+		uint16_t sync		: 1;
+		uint16_t testfail	: 1;
+#elif defined(_BIT_FIELDS_LTOH)
+		uint16_t testfail	: 1;
+		uint16_t sync		: 1;
+		uint16_t res1		: 1;
+		uint16_t losdtct	: 1;
+		uint16_t bsrxp		: 1;
+		uint16_t bsrxn		: 1;
+		uint16_t res		: 10;
+#endif
+	} bits;
+} k_esr_ti_stsrx_l_t;
+
+#define	K_TESTCFG_INNER_CML_EN_LOOOPBACK	0x3
+
 #ifdef	__cplusplus
 }
 #endif
--- a/usr/src/uts/common/sys/nxge/nxge_rxdma_hw.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/common/sys/nxge/nxge_rxdma_hw.h	Fri Dec 11 10:41:17 2009 -0800
@@ -403,6 +403,7 @@
 #define	RXDMA_CFIG2_MBADDR_L_SHIFT	6			/* bit 31:6 */
 #define	RXDMA_CFIG2_MBADDR_L_MASK	0x00000000ffffffc0ULL
 
+/* NOTE: offset256 valid only for Neptune-L and RF-NIU */
 typedef union _rxdma_cfig2_t {
 	uint64_t value;
 	struct {
@@ -412,14 +413,16 @@
 		struct {
 #if defined(_BIT_FIELDS_HTOL)
 			uint32_t mbaddr:26;
-			uint32_t res2:3;
+			uint32_t res2:2;
+			uint32_t offset256:1;
 			uint32_t offset:2;
 			uint32_t full_hdr:1;
 
 #elif defined(_BIT_FIELDS_LTOH)
 			uint32_t full_hdr:1;
 			uint32_t offset:2;
-			uint32_t res2:3;
+			uint32_t offset256:1;
+			uint32_t res2:2;
 			uint32_t mbaddr:26;
 #endif
 		} ldw;
--- a/usr/src/uts/sparc/os/driver_aliases	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sparc/os/driver_aliases	Fri Dec 11 10:41:17 2009 -0800
@@ -166,7 +166,9 @@
 ncp "SUNW,sun4v-ncp"
 ncp "SUNW,n2-mau"
 ncp "SUNW,vf-mau"
+ncp "SUNW,kt-mau"
 n2rng "SUNW,n2-rng"
 n2rng "SUNW,vf-rng"
+n2rng "SUNW,kt-rng"
 nulldriver "scsa,probe"
 nulldriver "scsa,nodev"
--- a/usr/src/uts/sun4/io/trapstat.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4/io/trapstat.c	Fri Dec 11 10:41:17 2009 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -266,23 +266,24 @@
  * be added via a hybrid scheme, where the same 4M virtual address is used
  * on different MMUs.
  *
- * On sun4v architecture, we currently don't use hybrid scheme as it imposes
- * additional restriction on live migration and transparent CPU replacement.
- * Instead, we increase the number of supported CPUs by reducing the virtual
- * address space requirements per CPU via shared interposing trap table as
- * follows:
+ * On sun4v architecture, we cannot use the hybrid scheme as the architecture
+ * imposes additional restriction on the number of permanent mappings per
+ * guest and it is illegal to use the same virtual address to map different
+ * TTEs on different MMUs. Instead, we increase the number of supported CPUs
+ * by reducing the virtual address space requirements per CPU via shared
+ * interposing trap table as follows:
  *
  *                                          Offset (within 4MB page)
  *       +------------------------------------+- 0x400000
- *       |  CPU 507 trap statistics (8KB)     |   .
- *       |- - - - - - - - - - - - - - - - - - +- 0x3fe000
+ *       |  CPU 1015 trap statistics (4KB)    |   .
+ *       |- - - - - - - - - - - - - - - - - - +- 0x3ff000
  *       |                                    |
  *       |   ...                              |
  *       |                                    |
- *       |- - - - - - - - - - - - - - - - - - +- 0x00c000
- *       |  CPU 1 trap statistics (8KB)       |   .
  *       |- - - - - - - - - - - - - - - - - - +- 0x00a000
- *       |  CPU 0 trap statistics (8KB)       |   .
+ *       |  CPU 1 trap statistics (4KB)       |   .
+ *       |- - - - - - - - - - - - - - - - - - +- 0x009000
+ *       |  CPU 0 trap statistics (4KB)       |   .
  *       |- - - - - - - - - - - - - - - - - - +- 0x008000
  *       |  Shared trap handler continuation  |   .
  *       |- - - - - - - - - - - - - - - - - - +- 0x006000
@@ -293,13 +294,19 @@
  *       |  Non-trap instruction, TL=0        |   .
  *       +------------------------------------+- 0x000000
  *
- * Note that each CPU has its own 8K space for its trap statistics but
+ * Note that each CPU has its own 4K space for its trap statistics but
  * shares the same interposing trap handlers.  Interposing trap handlers
  * use the CPU ID to determine the location of per CPU trap statistics
  * area dynamically. This increases the interposing trap handler overhead,
- * but is acceptable as it allows us to support up to 508 CPUs with one
+ * but is acceptable as it allows us to support up to 1016 CPUs with one
  * 4MB page on sun4v architecture. Support for additional CPUs can be
- * added via hybrid scheme as mentioned earlier.
+ * added with another 4MB page to 2040 cpus (or 3064 cpus with 2 additional
+ * 4MB pages). With additional 4MB pages, we cannot use displacement branch
+ * (ba instruction) and we have to use jmp instruction instead. Note that
+ * with sun4v, globals are nested (not per-trap type as in sun4u), so it is
+ * ok to use additional global reg to do jmp. This option is not available in
+ * sun4u which mandates the usage of displacement branches since no global reg
+ * is available at TL>1
  *
  * TLB Statistics
  *
@@ -546,10 +553,13 @@
 
 #else /* sun4v */
 
-static caddr_t		tstat_va;	/* VA of memory reserved for TBA */
-static pfn_t		tstat_pfn;	/* PFN of memory reserved for TBA */
+static caddr_t		tstat_va[TSTAT_NUM4M_LIMIT]; /* VAs of 4MB pages */
+static pfn_t		tstat_pfn[TSTAT_NUM4M_LIMIT]; /* PFNs of 4MB pages */
 static boolean_t	tstat_fast_tlbstat = B_FALSE;
 static int		tstat_traptab_initialized;
+static int		tstat_perm_mapping_failed;
+static int		tstat_hv_nopanic;
+static int		tstat_num4m_mapping;
 
 #endif /* sun4v */
 
@@ -597,9 +607,8 @@
 static void
 trapstat_load_tlb(void)
 {
-#ifndef sun4v
 	int i;
-#else
+#ifdef sun4v
 	uint64_t ret;
 #endif
 	tte_t tte;
@@ -625,16 +634,43 @@
 		}
 	}
 #else /* sun4v */
-	tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(tstat_pfn);
-	tte.tte_intlo = TTE_PFN_INTLO(tstat_pfn) | TTE_CP_INT |
-	    TTE_CV_INT | TTE_PRIV_INT | TTE_HWWR_INT |
-	    TTE_SZ_INTLO(TTE4M);
-	ret = hv_mmu_map_perm_addr(va, KCONTEXT, *(uint64_t *)&tte,
-	    MAP_ITLB | MAP_DTLB);
+	for (i = 0; i < tstat_num4m_mapping; i++) {
+		tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(tstat_pfn[i]);
+		tte.tte_intlo = TTE_PFN_INTLO(tstat_pfn[i]) | TTE_CP_INT |
+		    TTE_CV_INT | TTE_PRIV_INT | TTE_HWWR_INT |
+		    TTE_SZ_INTLO(TTE4M);
+		ret = hv_mmu_map_perm_addr(va, KCONTEXT, *(uint64_t *)&tte,
+		    MAP_ITLB | MAP_DTLB);
 
-	if (ret != H_EOK)
-		cmn_err(CE_PANIC, "trapstat: cannot map new TBA "
-		    "for cpu %d  (error: 0x%lx)", CPU->cpu_id, ret);
+		if (ret != H_EOK) {
+			if (tstat_hv_nopanic) {
+				int j;
+				/*
+				 * The first attempt to create perm mapping
+				 * failed. The guest might have exhausted its
+				 * perm mapping limit. We don't panic on first
+				 * try.
+				 */
+				tstat_perm_mapping_failed = 1;
+				va = tcpu->tcpu_vabase;
+				for (j = 0; j < i; j++) {
+					(void) hv_mmu_unmap_perm_addr(va,
+					    KCONTEXT, MAP_ITLB | MAP_DTLB);
+					va += MMU_PAGESIZE4M;
+				}
+				break;
+			}
+			/*
+			 * We failed on subsequent cpus trying to
+			 * create the same perm mappings. This
+			 * should not happen. Panic here.
+			 */
+			cmn_err(CE_PANIC, "trapstat: cannot create "
+			    "perm mappings for cpu %d "
+			    "(error: 0x%lx)", CPU->cpu_id, ret);
+		}
+		va += MMU_PAGESIZE4M;
+	}
 #endif /* sun4v */
 }
 
@@ -765,7 +801,11 @@
 	 * interposing trap table.  We can safely use tstat_buffer because
 	 * the caller of the trapstat_probe() cross call is holding tstat_lock.
 	 */
+#ifdef sun4v
+	bcopy(tcpu->tcpu_data, tstat_buffer, TSTAT_DATA_SIZE);
+#else
 	bcopy(tcpu->tcpu_data, tstat_buffer, tstat_data_t_size);
+#endif
 
 	tstat_probe_time = gethrtime();
 
@@ -777,8 +817,13 @@
 
 	tstat_probe_time = gethrtime() - tstat_probe_time;
 
+#ifdef sun4v
+	bcopy(tstat_buffer, tcpu->tcpu_data, TSTAT_DATA_SIZE);
+	tcpu->tcpu_tdata_peffect = (after - before) / TSTAT_PROBE_NPAGES;
+#else
 	bcopy(tstat_buffer, tcpu->tcpu_data, tstat_data_t_size);
 	tcpu->tcpu_data->tdata_peffect = (after - before) / TSTAT_PROBE_NPAGES;
+#endif
 }
 
 static void
@@ -970,18 +1015,47 @@
 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED);
 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ENABLED);
 
+#ifndef sun4v
 	data->tdata_snapts = gethrtime();
 	data->tdata_snaptick = rdtick();
 	bcopy(data, tstat_buffer, tstat_data_t_size);
-#ifdef sun4v
+#else
 	/*
-	 * Invoke processor specific interface to collect TSB hit
-	 * statistics on each processor.
+	 * For sun4v, in order to conserve space in the limited
+	 * per-cpu 4K buffer, we derive certain info somewhere else and
+	 * copy them directly into the tstat_buffer output.
+	 * Note that we either are collecting tlb stats or
+	 * regular trapstats but never both.
 	 */
-	if ((tstat_options & TSTAT_OPT_TLBDATA) && tstat_fast_tlbstat)
-		cpu_trapstat_data((void *) tstat_buffer->tdata_pgsz,
-		    tstat_pgszs);
-#endif
+	tstat_buffer->tdata_cpuid = CPU->cpu_id;
+	tstat_buffer->tdata_peffect = tcpu->tcpu_tdata_peffect;
+	tstat_buffer->tdata_snapts = gethrtime();
+	tstat_buffer->tdata_snaptick = rdtick();
+
+	if (tstat_options & TSTAT_OPT_TLBDATA) {
+		/* Copy tlb/tsb stats collected in the per-cpu trapdata */
+		tstat_tdata_t *tdata = (tstat_tdata_t *)data;
+		bcopy(&tdata->tdata_pgsz[0],
+		    &tstat_buffer->tdata_pgsz[0],
+		    tstat_pgszs * sizeof (tstat_pgszdata_t));
+
+		/*
+		 * Invoke processor specific interface to collect TLB stats
+		 * on each processor if enabled.
+		 */
+		if (tstat_fast_tlbstat) {
+			cpu_trapstat_data((void *) tstat_buffer->tdata_pgsz,
+			    tstat_pgszs);
+		}
+	} else {
+		/*
+		 * Normal trapstat collection.
+		 * Copy all the 4K data area into tstat_buffer tdata_trap
+		 * area.
+		 */
+		bcopy(data, &tstat_buffer->tdata_traps[0], TSTAT_DATA_SIZE);
+	}
+#endif /* sun4v */
 }
 
 /*
@@ -1023,7 +1097,7 @@
 #ifndef sun4v
 	uintptr_t tmptick = TSTAT_DATA_OFFS(tcpu, tdata_tmptick);
 #else
-	uintptr_t tmptick = TSTAT_CPU0_DATA_OFFS(tcpu, tdata_tmptick);
+	uintptr_t tmptick = TSTAT_CPU0_TLBDATA_OFFS(tcpu, tdata_tmptick);
 #endif
 
 	/*
@@ -1162,15 +1236,17 @@
 #define	TSTAT_TLBENT_TPCLO_KERN	28
 #define	TSTAT_TLBENT_TSHI	32
 #define	TSTAT_TLBENT_TSLO	35
-#define	TSTAT_TLBENT_BA		36
+#define	TSTAT_TLBENT_ADDRHI	36
+#define	TSTAT_TLBENT_ADDRLO	37
 #endif /* sun4v */
 
 static void
 trapstat_tlbent(tstat_percpu_t *tcpu, int entno)
 {
 	uint32_t *ent;
-	uintptr_t orig, va, baoffs;
+	uintptr_t orig, va;
 #ifndef sun4v
+	uintptr_t baoffs;
 	int itlb = entno == TSTAT_ENT_ITLBMISS;
 	uint32_t asi = itlb ? ASI(ASI_IMMU) : ASI(ASI_DMMU);
 #else
@@ -1185,10 +1261,16 @@
 #endif
 	int entoffs = entno << TSTAT_ENT_SHIFT;
 	uintptr_t tmptick, stat, tpc, utpc;
-	tstat_pgszdata_t *data = &tcpu->tcpu_data->tdata_pgsz[0];
+	tstat_pgszdata_t *data;
 	tstat_tlbdata_t *udata, *kdata;
 	tstat_tlbret_t *ret;
 
+#ifdef sun4v
+	data = &((tstat_tdata_t *)tcpu->tcpu_data)->tdata_pgsz[0];
+#else
+	data = &tcpu->tcpu_data->tdata_pgsz[0];
+#endif /* sun4v */
+
 	/*
 	 * When trapstat is run with TLB statistics, this is the entry for
 	 * both I- and D-TLB misses; this code performs trap level pushing,
@@ -1276,7 +1358,10 @@
 	    0x82004004,			/* add %g1, %g4, %g1		*/
 	    0x85410000,			/* rd    %tick, %g2		*/
 	    0xc4706000,			/* stx   %g2, [%g1 + %lo(tmptick)] */
-	    0x30800000			/* ba,a  addr			*/
+	    0x05000000,			/* sethi %hi(addr), %g2		*/
+	    0x8410a000,			/* or %g2, %lo(addr), %g2	*/
+	    0x81c08000,			/* jmp %g2			*/
+	    NOP,
 #endif /* sun4v */
 	};
 
@@ -1290,8 +1375,8 @@
 	ASSERT(entno == TSTAT_ENT_ITLBMISS || entno == TSTAT_ENT_DTLBMISS ||
 	    entno == TSTAT_ENT_IMMUMISS || entno == TSTAT_ENT_DMMUMISS);
 
-	stat = TSTAT_CPU0_DATA_OFFS(tcpu, tdata_traps) + entoffs;
-	tmptick = TSTAT_CPU0_DATA_OFFS(tcpu, tdata_tmptick);
+	stat = TSTAT_CPU0_TLBDATA_OFFS(tcpu, tdata_traps[entno]);
+	tmptick = TSTAT_CPU0_TLBDATA_OFFS(tcpu, tdata_tmptick);
 #endif /* sun4v */
 
 	if (itlb) {
@@ -1314,7 +1399,6 @@
 	ent = (uint32_t *)((uintptr_t)tcpu->tcpu_instr + entoffs);
 	orig = KERNELBASE + entoffs;
 	va = (uintptr_t)tcpu->tcpu_ibase + entoffs;
-	baoffs = TSTAT_TLBENT_BA * sizeof (uint32_t);
 
 #ifdef sun4v
 	/*
@@ -1358,7 +1442,13 @@
 	ent[TSTAT_TLBENT_TPCLO_KERN] |= LO10(tpc);
 	ent[TSTAT_TLBENT_TSHI] |= HI22(tmptick);
 	ent[TSTAT_TLBENT_TSLO] |= LO10(tmptick);
+#ifndef	sun4v
+	baoffs = TSTAT_TLBENT_BA * sizeof (uint32_t);
 	ent[TSTAT_TLBENT_BA] |= DISP22(va + baoffs, orig);
+#else
+	ent[TSTAT_TLBENT_ADDRHI] |= HI22(orig);
+	ent[TSTAT_TLBENT_ADDRLO] |= LO10(orig);
+#endif /* sun4v */
 
 	/*
 	 * And now set up the TLB return entries.
@@ -1478,14 +1568,15 @@
 #define	TSTAT_ENABLED_ADDRLO	3
 #define	TSTAT_ENABLED_CONTBA	6
 #define	TSTAT_ENABLED_TDATASHFT	7
-#define	TSTAT_DISABLED_BA	0
+#define	TSTAT_DISABLED_ADDRHI	0
+#define	TSTAT_DISABLED_ADDRLO	1
 
 static void
 trapstat_make_traptab(tstat_percpu_t *tcpu)
 {
 	uint32_t *ent;
 	uint64_t *stat;
-	uintptr_t orig, va, en_baoffs, dis_baoffs;
+	uintptr_t orig, va, en_baoffs;
 	uintptr_t tstat_cont_va;
 	int nent;
 
@@ -1546,15 +1637,17 @@
 
 	/*
 	 * This is the entry in the interposing trap table for disabled trap
-	 * table entries.  It simply branches to the actual, underlying trap
+	 * table entries.  It simply "jmp" to the actual, underlying trap
 	 * table entry.  As explained in the "Implementation Details" section
 	 * of the block comment, all TL>0 traps _must_ use the disabled entry;
 	 * additional entries may be explicitly disabled through the use
 	 * of TSTATIOC_ENTRY/TSTATIOC_NOENTRY.
 	 */
 	static const uint32_t disabled[TSTAT_ENT_NINSTR] = {
-	    0x30800000,			/* ba,a addr			*/
-	    NOP, NOP, NOP, NOP, NOP, NOP, NOP,
+	    0x05000000,			/* sethi %hi(addr), %g2		*/
+	    0x8410a000,			/* or %g2, %lo(addr), %g2	*/
+	    0x81c08000,			/* jmp %g2			*/
+	    NOP, NOP, NOP, NOP, NOP,
 	};
 
 	ASSERT(MUTEX_HELD(&tstat_lock));
@@ -1563,11 +1656,18 @@
 	orig = KERNELBASE;
 	va = (uintptr_t)tcpu->tcpu_ibase;
 	en_baoffs = TSTAT_ENABLED_CONTBA * sizeof (uint32_t);
-	dis_baoffs = TSTAT_DISABLED_BA * sizeof (uint32_t);
 	tstat_cont_va = TSTAT_INSTR_OFFS(tcpu, tinst_trapcnt);
 
 	for (nent = 0; nent < TSTAT_TOTAL_NENT; nent++) {
-		if (tstat_enabled[nent]) {
+		/*
+		 * If TSTAT_OPT_TLBDATA option is enabled (-t or -T option)
+		 * we make sure only TSTAT_TLB_NENT traps can be enabled.
+		 * Note that this logic is somewhat moot since trapstat
+		 * cmd actually use TSTATIOC_NOENTRY ioctl to disable all
+		 * traps when performing Tlb stats collection.
+		 */
+		if ((!(tstat_options & TSTAT_OPT_TLBDATA) ||
+		    nent < TSTAT_TLB_NENT) && tstat_enabled[nent]) {
 			bcopy(enabled, ent, sizeof (enabled));
 			ent[TSTAT_ENABLED_STATHI] |= HI22((uintptr_t)stat);
 			ent[TSTAT_ENABLED_STATLO] |= LO10((uintptr_t)stat);
@@ -1579,7 +1679,8 @@
 			    LO10((uintptr_t)TSTAT_DATA_SHIFT);
 		} else {
 			bcopy(disabled, ent, sizeof (disabled));
-			ent[TSTAT_DISABLED_BA] |= DISP22(va + dis_baoffs, orig);
+			ent[TSTAT_DISABLED_ADDRHI] |= HI22((uintptr_t)orig);
+			ent[TSTAT_DISABLED_ADDRLO] |= LO10((uintptr_t)orig);
 		}
 
 		stat++;
@@ -1620,6 +1721,8 @@
 	cpu_t *cp;
 	uint_t strand_idx;
 	size_t tstat_offset;
+#else
+	uint64_t offset;
 #endif
 
 	ASSERT(tcpu->tcpu_pfn == NULL);
@@ -1709,17 +1812,19 @@
 	 * The lower fifteen bits of the %tba are always read as zero; hence
 	 * it must be aligned at least on 512K boundary.
 	 */
-	tcpu->tcpu_vabase = (caddr_t)(KERNELBASE - MMU_PAGESIZE4M);
+	tcpu->tcpu_vabase = (caddr_t)(KERNELBASE -
+	    MMU_PAGESIZE4M * tstat_num4m_mapping);
 	tcpu->tcpu_ibase = tcpu->tcpu_vabase;
 	tcpu->tcpu_dbase = tcpu->tcpu_ibase + TSTAT_INSTR_SIZE +
 	    cpu * TSTAT_DATA_SIZE;
 
-	tcpu->tcpu_pfn = &tstat_pfn;
-	tcpu->tcpu_instr = (tstat_instr_t *)tstat_va;
-	tcpu->tcpu_data = (tstat_data_t *)(tstat_va + TSTAT_INSTR_SIZE +
-	    cpu * TSTAT_DATA_SIZE);
+	tcpu->tcpu_pfn = &tstat_pfn[0];
+	tcpu->tcpu_instr = (tstat_instr_t *)tstat_va[0];
+
+	offset = TSTAT_INSTR_SIZE + cpu * TSTAT_DATA_SIZE;
+	tcpu->tcpu_data = (tstat_data_t *)(tstat_va[offset >> MMU_PAGESHIFT4M] +
+	    (offset & MMU_PAGEOFFSET4M));
 	bzero(tcpu->tcpu_data, TSTAT_DATA_SIZE);
-	tcpu->tcpu_data->tdata_cpuid = cpu;
 
 	/*
 	 * Now that we have all of the instruction and data pages allocated,
@@ -1759,9 +1864,7 @@
 trapstat_teardown(processorid_t cpu)
 {
 	tstat_percpu_t *tcpu = &tstat_percpu[cpu];
-#ifndef sun4v
 	int i;
-#endif
 	caddr_t va = tcpu->tcpu_vabase;
 
 	ASSERT(tcpu->tcpu_pfn != NULL);
@@ -1783,7 +1886,10 @@
 		    (uint64_t)ksfmmup);
 	}
 #else
-	xt_one(cpu, vtag_unmap_perm_tl1, (uint64_t)va, KCONTEXT);
+	for (i = 0; i < tstat_num4m_mapping; i++) {
+		xt_one(cpu, vtag_unmap_perm_tl1, (uint64_t)va, KCONTEXT);
+		va += MMU_PAGESIZE4M;
+	}
 #endif
 
 	tcpu->tcpu_pfn = NULL;
@@ -1796,6 +1902,9 @@
 trapstat_go()
 {
 	cpu_t *cp;
+#ifdef sun4v
+	int i;
+#endif /* sun4v */
 
 	mutex_enter(&cpu_lock);
 	mutex_enter(&tstat_lock);
@@ -1808,16 +1917,33 @@
 
 #ifdef sun4v
 	/*
-	 * Allocate large page to hold interposing tables.
+	 * Compute the actual number of 4MB mappings
+	 * we need based on the guest's ncpu_guest_max value.
+	 * Note that earlier at compiled time, we did establish
+	 * and check against the sun4v solaris arch limit
+	 * (TSTAT_NUM4M_LIMIT) which is based on NCPU.
+	 */
+	tstat_num4m_mapping = TSTAT_NUM4M_MACRO(ncpu_guest_max);
+	ASSERT(tstat_num4m_mapping <= TSTAT_NUM4M_LIMIT);
+
+	/*
+	 * Allocate large pages to hold interposing tables.
 	 */
-	tstat_va = contig_mem_alloc(MMU_PAGESIZE4M);
-	tstat_pfn = va_to_pfn(tstat_va);
-	if (tstat_pfn == PFN_INVALID) {
-		mutex_exit(&tstat_lock);
-		mutex_exit(&cpu_lock);
-		return (EAGAIN);
+	for (i = 0; i < tstat_num4m_mapping; i++) {
+		tstat_va[i] = contig_mem_alloc(MMU_PAGESIZE4M);
+		tstat_pfn[i] = va_to_pfn(tstat_va[i]);
+		if (tstat_pfn[i] == PFN_INVALID) {
+			int j;
+			for (j = 0; j < i; j++) {
+				contig_mem_free(tstat_va[j], MMU_PAGESIZE4M);
+			}
+			mutex_exit(&tstat_lock);
+			mutex_exit(&cpu_lock);
+			return (EAGAIN);
+		}
 	}
 
+
 	/*
 	 * For detailed TLB statistics, invoke CPU specific interface
 	 * to see if it supports a low overhead interface to collect
@@ -1832,12 +1958,17 @@
 		if (error == 0)
 			tstat_fast_tlbstat = B_TRUE;
 		else if (error != ENOTSUP) {
-			contig_mem_free(tstat_va, MMU_PAGESIZE4M);
+			for (i = 0; i < tstat_num4m_mapping; i++) {
+				contig_mem_free(tstat_va[i], MMU_PAGESIZE4M);
+			}
 			mutex_exit(&tstat_lock);
 			mutex_exit(&cpu_lock);
 			return (error);
 		}
 	}
+
+	tstat_hv_nopanic = 1;
+	tstat_perm_mapping_failed = 0;
 #endif /* sun4v */
 
 	/*
@@ -1850,7 +1981,6 @@
 	 */
 	trapstat_probe_alloc();
 
-
 	cp = cpu_list;
 	do {
 		if (!(tstat_percpu[cp->cpu_id].tcpu_flags & TSTAT_CPU_SELECTED))
@@ -1864,6 +1994,41 @@
 		 * of in parallel with an xc_all().
 		 */
 		xc_one(cp->cpu_id, (xcfunc_t *)trapstat_probe, 0, 0);
+
+#ifdef sun4v
+		/*
+		 * Check to see if the first cpu's attempt to create
+		 * the perm mappings failed. This might happen if the
+		 * guest somehow exhausted all its limited perm mappings.
+		 * Note that we only check this once for the first
+		 * attempt since it shouldn't fail for subsequent cpus
+		 * mapping the same TTEs if the first attempt was successful.
+		 */
+		if (tstat_hv_nopanic && tstat_perm_mapping_failed) {
+			tstat_percpu_t *tcpu = &tstat_percpu[cp->cpu_id];
+			for (i = 0; i < tstat_num4m_mapping; i++) {
+				contig_mem_free(tstat_va[i], MMU_PAGESIZE4M);
+			}
+
+			/*
+			 * Do clean up before returning.
+			 * Cleanup is manageable since we
+			 * only need to do it for the first cpu
+			 * iteration that failed.
+			 */
+			trapstat_probe_free();
+			trapstat_hotpatch();
+			tcpu->tcpu_pfn = NULL;
+			tcpu->tcpu_instr = NULL;
+			tcpu->tcpu_data = NULL;
+			tcpu->tcpu_flags &= ~TSTAT_CPU_ALLOCATED;
+			mutex_exit(&tstat_lock);
+			mutex_exit(&cpu_lock);
+			return (EAGAIN);
+		}
+		tstat_hv_nopanic = 0;
+#endif /* sun4v */
+
 	} while ((cp = cp->cpu_next) != cpu_list);
 
 	xc_all((xcfunc_t *)trapstat_enable, 0, 0);
@@ -1900,7 +2065,8 @@
 	tstat_traptab_initialized = 0;
 	if (tstat_options & TSTAT_OPT_TLBDATA)
 		cpu_trapstat_conf(CPU_TSTATCONF_FINI);
-	contig_mem_free(tstat_va, MMU_PAGESIZE4M);
+	for (i = 0; i < tstat_num4m_mapping; i++)
+		contig_mem_free(tstat_va[i], MMU_PAGESIZE4M);
 #endif
 	trapstat_hotpatch();
 	tstat_running = 0;
@@ -2016,7 +2182,11 @@
 		 * Preserve this CPU's data in tstat_buffer and rip down its
 		 * interposing trap table.
 		 */
+#ifdef sun4v
+		bcopy(tcpu->tcpu_data, tstat_buffer, TSTAT_DATA_SIZE);
+#else
 		bcopy(tcpu->tcpu_data, tstat_buffer, tstat_data_t_size);
+#endif /* sun4v */
 		trapstat_teardown(cp->cpu_id);
 		ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED));
 
@@ -2026,7 +2196,11 @@
 		 */
 		trapstat_setup(cp->cpu_id);
 		ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED);
+#ifdef sun4v
+		bcopy(tstat_buffer, tcpu->tcpu_data, TSTAT_DATA_SIZE);
+#else
 		bcopy(tstat_buffer, tcpu->tcpu_data, tstat_data_t_size);
+#endif /* sun4v */
 
 		xc_one(cp->cpu_id, (xcfunc_t *)trapstat_enable, 0, 0);
 	} while ((cp = cp->cpu_next) != cpu_list);
@@ -2360,7 +2534,15 @@
 	tstat_data_size = tstat_data_pages * MMU_PAGESIZE;
 	tstat_total_size = TSTAT_INSTR_SIZE + tstat_data_size;
 #else
-	ASSERT(tstat_data_t_size <= TSTAT_DATA_SIZE);
+	/*
+	 * For sun4v, the tstat_data_t_size reflect the tstat_buffer
+	 * output size based on tstat_data_t structure. For tlbstats
+	 * collection, we use the internal tstat_tdata_t structure
+	 * to collect the tlbstats for the pages. Therefore we
+	 * need to adjust the size for the assertion.
+	 */
+	ASSERT((tstat_data_t_size - sizeof (tstat_data_t) +
+	    sizeof (tstat_tdata_t)) <= TSTAT_DATA_SIZE);
 #endif
 
 	tstat_percpu = kmem_zalloc((max_cpuid + 1) *
@@ -2461,7 +2643,7 @@
 
 static struct modldrv modldrv = {
 	&mod_driverops,		/* Type of module.  This one is a driver */
-	"Trap Statistics",	/* name of module */
+	"Trap Statistics 1.1",	/* name of module */
 	&trapstat_ops,		/* driver ops */
 };
 
--- a/usr/src/uts/sun4/sys/trapstat.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4/sys/trapstat.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,15 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef _SYS_TRAPSTAT_H
 #define	_SYS_TRAPSTAT_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifndef _ASM
 #include <sys/processor.h>
 #endif
@@ -78,6 +76,29 @@
 	tstat_modedata_t	tpgsz_kernel;
 } tstat_pgszdata_t;
 
+#ifdef sun4v
+/*
+ * For sun4v, we optimized by using a smaller 4K data area
+ * per-cpu. We use separate structures for data collection,
+ * one for normal trapstat collection and one for collecting
+ * TLB stats. Note that we either collect normal trapstats
+ * or TLB stats, never both. For TLB stats, we are only
+ * interested in the MMU/TLB miss traps (which are trap #s
+ * 0x9, 0x32, 0x64 & 0x68)
+ */
+#define	TSTAT_TLB_NENT	200 /* max trap entries for tlb stats */
+
+typedef struct tstat_ndata {
+	uint64_t	tdata_traps[TSTAT_NENT];
+} tstat_ndata_t;
+
+typedef struct tstat_tdata {
+	uint64_t	tdata_traps[TSTAT_TLB_NENT];
+	hrtime_t	tdata_tmptick;
+	tstat_pgszdata_t tdata_pgsz[1];
+} tstat_tdata_t;
+#endif /* sun4v */
+
 typedef struct tstat_data {
 	processorid_t	tdata_cpuid;
 	hrtime_t	tdata_snapts;
@@ -153,20 +174,52 @@
 
 #ifdef sun4v
 
-#if (NCPU > 508)
-#error "sun4v trapstat supports up to 508 cpus"
-#endif
-
 #define	TSTAT_TLB_STATS		0x1		/* cpu_tstat_flags */
 #define	TSTAT_INSTR_SIZE	\
 	((sizeof (tstat_instr_t) + MMU_PAGESIZE - 1) & ~(MMU_PAGESIZE - 1))
-#define	TSTAT_DATA_SHIFT	13
-#define	TSTAT_DATA_SIZE		(1 << TSTAT_DATA_SHIFT)	/* 8K per CPU */
+#define	TSTAT_DATA_SHIFT	12
+#define	TSTAT_DATA_SIZE		(1 << TSTAT_DATA_SHIFT)	/* 4K per CPU */
 #define	TSTAT_TBA_MASK		~((1 << 15) - 1)	/* 32K boundary */
 
 #define	TSTAT_CPU0_DATA_OFFS(tcpu, mem)	\
 	((uintptr_t)(tcpu)->tcpu_ibase + TSTAT_INSTR_SIZE + \
-	    offsetof(tstat_data_t, mem))
+	    offsetof(tstat_ndata_t, mem))
+
+#define	TSTAT_CPU0_TLBDATA_OFFS(tcpu, mem) \
+	((uintptr_t)(tcpu)->tcpu_ibase + TSTAT_INSTR_SIZE + \
+	    offsetof(tstat_tdata_t, mem))
+
+/*
+ * Sun4v trapstat can use up to 3 4MB pages to support
+ * 3064 cpus. Each cpu needs 4K of data page for stats collection.
+ * The first 32K (TSTAT_TRAPTBLE_SIZE) in the first 4 MB page is
+ * use for the traptable leaving 4MB - 32K = 4064K for cpu data
+ * which work out to be 4064/4K = 1016 cpus. Each additional
+ * 4MB page (2nd and 3rd ones) can support 4096/4 = 1024 cpus.
+ * This works out to be a total of 1016 + 1024 + 1024 = 3064 cpus.
+ */
+#define	ROUNDUP(a, n)	(((a) + ((n) - 1)) & ~((n) - 1))
+#define	TSTAT_MAXNUM4M_MAPPING	3
+#define	TSTAT_TRAPTBL_SIZE	(32 * 1024)
+#define	TSTAT_NUM4M_LIMIT \
+	(ROUNDUP((NCPU * TSTAT_DATA_SIZE) + TSTAT_TRAPTBL_SIZE, \
+	    MMU_PAGESIZE4M) >> MMU_PAGESHIFT4M)
+
+#if (TSTAT_NUM4M_LIMIT > TSTAT_MAXNUM4M_MAPPING)
+#error "NCPU is too large for trapstat"
+#endif
+
+/*
+ * Note that the macro below is almost identical to the
+ * one for TSTAT_NUM4M_LIMIT with one difference. Instead of
+ * using TSTAT_TRAPTBL_SIZE constant, it uses TSTAT_INSTR_SIZE which
+ * has a runtime sizeof() expression. The result should be
+ * the same. This macro is used at runtime as an extra
+ * validation for correctness.
+ */
+#define	TSTAT_NUM4M_MACRO(ncpu) \
+	(ROUNDUP(((ncpu) * TSTAT_DATA_SIZE) + TSTAT_INSTR_SIZE, \
+	    MMU_PAGESIZE4M) >> MMU_PAGESHIFT4M)
 
 #else /* sun4v */
 
@@ -204,6 +257,9 @@
 	pfn_t		*tcpu_pfn;
 	tstat_instr_t	*tcpu_instr;
 	tstat_data_t	*tcpu_data;
+#ifdef sun4v
+	hrtime_t	tcpu_tdata_peffect;
+#endif /* sun4v */
 } tstat_percpu_t;
 
 #endif
--- a/usr/src/uts/sun4v/Makefile.files	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/Makefile.files	Fri Dec 11 10:41:17 2009 -0800
@@ -121,6 +121,7 @@
 FPC_OBJS	+= fpc-impl-4v.o fpc-asm-4v.o
 N2PIUPC_OBJS	+= n2piupc.o n2piupc_tables.o n2piupc_kstats.o \
 			n2piupc_biterr.o n2piupc_asm.o
+IOSPC_OBJS	+= iospc.o rfios_iospc.o rfios_tables.o rfios_asm.o
 TRAPSTAT_OBJS	+= trapstat.o
 NIUMX_OBJS	+= niumx.o
 N2RNG_OBJS	+= n2rng.o n2rng_debug.o n2rng_hcall.o n2rng_kcf.o \
@@ -153,7 +154,7 @@
 VSW_OBJS	= vsw.o vsw_ldc.o vsw_phys.o vsw_switching.o vsw_hio.o
 VDC_OBJS	= vdc.o
 VDS_OBJS	= vds.o
-DS_PRI_OBJS	= ds_pri.o
+DS_PRI_OBJS	= ds_pri.o ds_pri_hcall.o
 DS_SNMP_OBJS	= ds_snmp.o
 VLDS_OBJS	= vlds.o
 
--- a/usr/src/uts/sun4v/Makefile.rules	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/Makefile.rules	Fri Dec 11 10:41:17 2009 -0800
@@ -91,6 +91,13 @@
 $(OBJS_DIR)/%.o:		$(UTSBASE)/sun4v/io/n2piupc/%.s
 	$(COMPILE.s) -o $@ $<
 
+$(OBJS_DIR)/%.o:		$(UTSBASE)/sun4v/io/iospc/%.c
+	$(COMPILE.c) -o $@ $<
+	$(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o:		$(UTSBASE)/sun4v/io/iospc/%.s
+	$(COMPILE.s) -o $@ $<
+
 $(OBJS_DIR)/%.o:		$(UTSBASE)/sun4v/io/glvc/%.c
 	$(COMPILE.c) -o $@ $<
 	$(CTFCONVERT_O)
@@ -197,6 +204,12 @@
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/sun4v/io/n2piupc/%.s
 	@($(LHEAD) $(LINT.s) $< $(LTAIL))
 
+$(LINTS_DIR)/%.ln:		$(UTSBASE)/sun4v/io/iospc/%.c
+	@($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln:		$(UTSBASE)/sun4v/io/iospc/%.s
+	@($(LHEAD) $(LINT.s) $< $(LTAIL))
+
 $(LINTS_DIR)/%.ln:		$(UTSBASE)/sun4v/io/glvc/%.c
 	@($(LHEAD) $(LINT.c) $< $(LTAIL))
 
--- a/usr/src/uts/sun4v/Makefile.sun4v.shared	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/Makefile.sun4v.shared	Fri Dec 11 10:41:17 2009 -0800
@@ -336,6 +336,7 @@
 DRV_KMODS	+= ntwdt
 DRV_KMODS	+= nxge
 DRV_KMODS	+= n2piupc
+DRV_KMODS	+= iospc
 DRV_KMODS	+= n2rng
 DRV_KMODS	+= px
 DRV_KMODS	+= qcn
@@ -432,7 +433,7 @@
 #
 #	cpu modules
 #
-CPU_KMODS	+= generic niagara niagara2 vfalls
+CPU_KMODS	+= generic niagara niagara2 vfalls kt
 
 LINT_CPU_KMODS	+= generic
 
@@ -442,3 +443,4 @@
 PCBE_KMODS	+= niagara_pcbe
 PCBE_KMODS	+= niagara2_pcbe
 PCBE_KMODS	+= vfalls_pcbe
+PCBE_KMODS	+= kt_pcbe
--- a/usr/src/uts/sun4v/cpu/niagara2.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/cpu/niagara2.c	Fri Dec 11 10:41:17 2009 -0800
@@ -20,12 +20,10 @@
  */
 
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #include <sys/types.h>
 #include <sys/systm.h>
 #include <sys/archsystm.h>
@@ -66,6 +64,8 @@
 char cpu_module_name[] = "SUNW,UltraSPARC-T2";
 #elif defined(VFALLS_IMPL)
 char cpu_module_name[] = "SUNW,UltraSPARC-T2+";
+#elif defined(KT_IMPL)
+char cpu_module_name[] = "SUNW,UltraSPARC-KT";
 #endif
 
 /*
@@ -84,6 +84,11 @@
 	HSVC_REV_1, NULL, HSVC_GROUP_VFALLS_CPU, VFALLS_HSVC_MAJOR,
 	VFALLS_HSVC_MINOR, cpu_module_name
 };
+#elif defined(KT_IMPL)
+static hsvc_info_t cpu_hsvc = {
+	HSVC_REV_1, NULL, HSVC_GROUP_KT_CPU, KT_HSVC_MAJOR,
+	KT_HSVC_MINOR, cpu_module_name
+};
 #endif
 
 void
@@ -113,6 +118,24 @@
 	 */
 	cpu_setup_common(NULL);
 
+	/*
+	 * Initialize the cpu_hwcap_flags for N2 and VF if it is not already
+	 * set in cpu_setup_common() by the hwcap MD info. Note that this MD
+	 * info may not be available for N2/VF.
+	 */
+	if (cpu_hwcap_flags == 0) {
+#ifdef KT_IMPL
+		/*
+		 * This should not happen since hwcap MD info is always
+		 * available for KT platforms.
+		 */
+		ASSERT(cpu_hwcap_flags != 0);	/* panic in DEBUG mode */
+		cpu_hwcap_flags |= AV_SPARC_VIS3 | AV_SPARC_HPC | AV_SPARC_FMAF;
+#endif /* KT_IMPL */
+		cpu_hwcap_flags |= AV_SPARC_VIS | AV_SPARC_VIS2 |
+		    AV_SPARC_ASI_BLK_INIT | AV_SPARC_POPC;
+	}
+
 	cache |= (CACHE_PTAG | CACHE_IOCOHERENT);
 
 	if ((mmu_exported_pagesize_mask &
@@ -123,9 +146,6 @@
 		    " 8K, 64K and 4M: MD mask is 0x%x",
 		    mmu_exported_pagesize_mask);
 
-	cpu_hwcap_flags = AV_SPARC_VIS | AV_SPARC_VIS2 |
-	    AV_SPARC_ASI_BLK_INIT | AV_SPARC_POPC;
-
 	/*
 	 * Niagara2 supports a 48-bit subset of the full 64-bit virtual
 	 * address space. Virtual addresses between 0x0000800000000000
@@ -494,13 +514,11 @@
 
 	/* restart here when we switch memblocks */
 next_mem_block:
-	if (szc <= TTE64K) {
-		pfnmn = PAPFN_2_MNODE(pfn);
-	}
-	if (((page_papfn_2_color_cpu(pfn, szc) ^ color) & ceq_mask) == 0 &&
-	    (szc > TTE64K || pfnmn == it->mi_mnode)) {
+	pfnmn = PAPFN_2_MNODE(pfn);
+	if ((((page_papfn_2_color_cpu(pfn, szc) ^ color) & ceq_mask) == 0) &&
+	    (pfnmn == it->mi_mnode)) {
 
-		/* we start from the page with correct color */
+		/* we start from the page with correct color and mnode */
 		if (szc >= TTE512K) {
 			if (szc >= TTE4M) {
 				/* page color is PA[32:28] */
@@ -510,6 +528,11 @@
 				pfn_ceq_mask = ((ceq_mask & 1) << 6) |
 				    ((ceq_mask >> 1) << 15);
 			}
+			/*
+			 * Preserve mnode bits in case they are not part of the
+			 * color mask (eg., 8GB interleave, mnode bits 34:33).
+			 */
+			pfn_ceq_mask |= it->mi_mnode_pfn_mask;
 			npfn = ADD_MASKED(pfn, pstep, pfn_ceq_mask, mask);
 			goto done;
 		} else {
@@ -554,8 +577,9 @@
 		} else {
 			/* try get the right color by changing bit PA[19:19] */
 			npfn = pfn + pstep;
-			if (((page_papfn_2_color_cpu(npfn, szc) ^ color) &
-			    ceq_mask) == 0)
+			pfnmn = PAPFN_2_MNODE(npfn);
+			if ((((page_papfn_2_color_cpu(npfn, szc) ^ color) &
+			    ceq_mask) == 0) && (pfnmn == it->mi_mnode))
 				goto done;
 
 			/* page color is PA[32:28].PA[19:19] */
@@ -565,6 +589,16 @@
 			npfn = ((pfn >> 20) << 20) | pfn_color;
 		}
 
+		/* Fix mnode if necessary */
+		if ((pfnmn = PAPFN_2_MNODE(npfn)) != it->mi_mnode)
+			npfn += ((it->mi_mnode - pfnmn) & it->mi_mnode_mask) <<
+			    it->mi_mnode_pfn_shift;
+
+		/*
+		 * Preserve mnode bits in case they are not part of the color
+		 * mask eg 8GB interleave, mnode bits 34:33).
+		 */
+		pfn_ceq_mask |= it->mi_mnode_pfn_mask;
 		while (npfn <= pfn) {
 			npfn = ADD_MASKED(npfn, pstep, pfn_ceq_mask, mask);
 		}
--- a/usr/src/uts/sun4v/cpu/niagara2_asm.s	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/cpu/niagara2_asm.s	Fri Dec 11 10:41:17 2009 -0800
@@ -19,12 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #if !defined(lint)
 #include "assym.h"
 #endif
@@ -64,6 +62,8 @@
 	mov     HV_NIAGARA2_GETPERF, %o5
 #elif defined(VFALLS_IMPL)
 	mov	HV_VFALLS_GETPERF, %o5
+#elif defined(KT_IMPL)
+	mov	HV_KT_GETPERF, %o5
 #endif
 	ta      FAST_TRAP
 	brz,a   %o0, 1f
@@ -81,6 +81,8 @@
 	mov     HV_NIAGARA2_SETPERF, %o5
 #elif defined(VFALLS_IMPL)
 	mov     HV_VFALLS_SETPERF, %o5
+#elif defined(KT_IMPL)
+	mov     HV_KT_SETPERF, %o5
 #endif
 	ta      FAST_TRAP
 	retl
--- a/usr/src/uts/sun4v/cpu/niagara_perfctr.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/cpu/niagara_perfctr.c	Fri Dec 11 10:41:17 2009 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -33,7 +33,7 @@
 #include <sys/kstat.h>
 #if defined(NIAGARA_IMPL)
 #include <sys/niagararegs.h>
-#elif defined(NIAGARA2_IMPL) || defined(VFALLS_IMPL)
+#elif defined(NIAGARA2_IMPL) || defined(VFALLS_IMPL) || defined(KT_IMPL)
 #include <sys/niagara2regs.h>
 #endif
 
@@ -58,6 +58,7 @@
  * counters.
  */
 #define	NUM_OF_PICS	2
+#define	NUM_OF_PIC_REGS	1
 
 typedef struct ni_ksinfo {
 	uint8_t		pic_no_evs;			/* number of events */
@@ -66,13 +67,13 @@
 	uint64_t	pic_mask[NUM_OF_PICS];
 	kstat_t		*pic_name_ksp[NUM_OF_PICS];
 	kstat_t		*cntr_ksp;
-	uint32_t	pic_reg[NUM_OF_PICS];
+	uint32_t	pic_reg[NUM_OF_PIC_REGS];
 	uint32_t	pcr_reg;
 	uint32_t	pic_overflow[NUM_OF_PICS];	/* overflow count */
 	uint32_t	pic_last_val[NUM_OF_PICS];	/* last PIC value */
 } ni_ksinfo_t;
 
-static ni_ksinfo_t	*ni_dram_kstats[NIAGARA_DRAM_BANKS];
+static ni_ksinfo_t	*ni_dram_kstats[DRAM_BANKS];
 
 #if defined(NIAGARA_IMPL)
 static ni_ksinfo_t	*ni_jbus_kstat;
@@ -80,19 +81,24 @@
 
 typedef struct ni_perf_regs {
 	uint32_t	pcr_reg;
-	uint32_t	pic_reg;
+	uint32_t	pic_reg[NUM_OF_PIC_REGS];
 } ni_perf_regs_t;
 
 static ni_perf_regs_t dram_perf_regs[] = {
-	{HV_NIAGARA_DRAM_CTL0, HV_NIAGARA_DRAM_COUNT0},
-	{HV_NIAGARA_DRAM_CTL1, HV_NIAGARA_DRAM_COUNT1},
-	{HV_NIAGARA_DRAM_CTL2, HV_NIAGARA_DRAM_COUNT2},
-	{HV_NIAGARA_DRAM_CTL3, HV_NIAGARA_DRAM_COUNT3},
-#ifdef VFALLS_IMPL
-	{HV_NIAGARA_DRAM_CTL4, HV_NIAGARA_DRAM_COUNT4},
-	{HV_NIAGARA_DRAM_CTL5, HV_NIAGARA_DRAM_COUNT5},
-	{HV_NIAGARA_DRAM_CTL6, HV_NIAGARA_DRAM_COUNT6},
-	{HV_NIAGARA_DRAM_CTL7, HV_NIAGARA_DRAM_COUNT7}
+#if defined(NIAGARA_IMPL) || defined(NIAGARA2_IMPL)
+	{HV_DRAM_CTL0, HV_DRAM_COUNT0},
+	{HV_DRAM_CTL1, HV_DRAM_COUNT1},
+	{HV_DRAM_CTL2, HV_DRAM_COUNT2},
+	{HV_DRAM_CTL3, HV_DRAM_COUNT3},
+#elif defined(VFALLS_IMPL) || defined(KT_IMPL)
+	{HV_DRAM_CTL0, HV_DRAM_COUNT0},
+	{HV_DRAM_CTL1, HV_DRAM_COUNT1},
+	{HV_DRAM_CTL2, HV_DRAM_COUNT2},
+	{HV_DRAM_CTL3, HV_DRAM_COUNT3},
+	{HV_DRAM_CTL4, HV_DRAM_COUNT4},
+	{HV_DRAM_CTL5, HV_DRAM_COUNT5},
+	{HV_DRAM_CTL6, HV_DRAM_COUNT6},
+	{HV_DRAM_CTL7, HV_DRAM_COUNT7}
 #endif
 };
 
@@ -169,16 +175,31 @@
  */
 static ni_kev_mask_t
 niagara_dram_events[] = {
+#if defined(NIAGARA_IMPL) || defined(NIAGARA2_IMPL) || defined(VFALLS_IMPL)
 	{"mem_reads",		0x0},
 	{"mem_writes",		0x1},
 	{"mem_read_write",	0x2},
-#if defined(NIAGARA_IMPL)
+#elif defined(KT_IMPL)
+	{"remote_reads",	0x0},
+	{"remote_writes",	0x1},
+	{"remote_read_write",	0x2},
+#endif
+#if defined(NIAGARA_IMPL) || defined(KT_IMPL)
 	{"bank_busy_stalls",	0x3},
 #endif
 	{"rd_queue_latency",	0x4},
 	{"wr_queue_latency",	0x5},
 	{"rw_queue_latency",	0x6},
+#if defined(NIAGARA_IMPL) || defined(NIAGARA2_IMPL) || defined(VFALLS_IMPL)
 	{"wb_buf_hits",		0x7},
+#elif defined(KT_IMPL)
+	{"write_queue_drain",	0x7},
+	{"read_all_channels",	0x8},
+	{"write_starved",	0x9},
+	{"write_all_channels",	0xa},
+	{"read_write_channel0",	0xb},
+	{"read_write_channel1",	0xc},
+#endif
 	{"clear_pic",		0xf}
 };
 
@@ -281,7 +302,7 @@
 {
 	int i;
 	ni_ksinfo_t *ksinfop;
-#ifdef VFALLS_IMPL
+#if defined(VFALLS_IMPL) || defined(KT_IMPL)
 	uint64_t stat, pcr;
 #endif
 
@@ -293,8 +314,8 @@
 	/*
 	 * Create DRAM perf events kstat
 	 */
-	for (i = 0; i < NIAGARA_DRAM_BANKS; i++) {
-#ifdef VFALLS_IMPL
+	for (i = 0; i < DRAM_BANKS; i++) {
+#if defined(VFALLS_IMPL) || defined(KT_IMPL)
 		/* check if this dram instance is enabled in the HW */
 		stat = hv_niagara_getperf(dram_perf_regs[i].pcr_reg, &pcr);
 		if ((stat != H_EINVAL) && (stat != H_ENOTSUPPORTED)) {
@@ -311,13 +332,13 @@
 			ksinfop->pic_no_evs =
 			    sizeof (niagara_dram_events) /
 			    sizeof (ni_kev_mask_t);
-			ksinfop->pic_sel_shift[0] = NIAGARA_DRAM_PIC0_SEL_SHIFT;
-			ksinfop->pic_shift[0] = NIAGARA_DRAM_PIC0_SHIFT;
-			ksinfop->pic_mask[0] = NIAGARA_DRAM_PIC0_MASK;
-			ksinfop->pic_sel_shift[1] = NIAGARA_DRAM_PIC1_SEL_SHIFT;
-			ksinfop->pic_shift[1] = NIAGARA_DRAM_PIC1_SHIFT;
-			ksinfop->pic_mask[1] = NIAGARA_DRAM_PIC1_MASK;
-			ksinfop->pic_reg[0] = dram_perf_regs[i].pic_reg;
+			ksinfop->pic_sel_shift[0] = DRAM_PIC0_SEL_SHIFT;
+			ksinfop->pic_shift[0] = DRAM_PIC0_SHIFT;
+			ksinfop->pic_mask[0] = DRAM_PIC0_MASK;
+			ksinfop->pic_sel_shift[1] = DRAM_PIC1_SEL_SHIFT;
+			ksinfop->pic_shift[1] = DRAM_PIC1_SHIFT;
+			ksinfop->pic_mask[1] = DRAM_PIC1_MASK;
+			ksinfop->pic_reg[0] = dram_perf_regs[i].pic_reg[0];
 			ksinfop->pcr_reg = dram_perf_regs[i].pcr_reg;
 			ni_dram_kstats[i] = ksinfop;
 
@@ -329,7 +350,7 @@
 			/* create counter kstats */
 			ni_dram_kstats[i]->cntr_ksp = ni_create_cntr_kstat(
 			    "dram", i, ni_cntr_kstat_update, ksinfop);
-#ifdef VFALLS_IMPL
+#if defined(VFALLS_IMPL) || defined(KT_IMPL)
 		}
 #endif
 	}
@@ -484,7 +505,7 @@
 		printf("ni_kstat_fini called\n");
 #endif
 
-	for (i = 0; i < NIAGARA_DRAM_BANKS; i++) {
+	for (i = 0; i < DRAM_BANKS; i++) {
 		if (ni_dram_kstats[i] != NULL) {
 			ni_delete_name_kstat(ni_dram_kstats[i]);
 			if (ni_dram_kstats[i]->cntr_ksp != NULL)
@@ -773,7 +794,6 @@
 		    hv_niagara_getperf(ksinfop->pcr_reg, &pcr) != 0)
 			stat = EACCES;
 		else {
-
 			data_p[0].value.ui64 = pcr;
 
 			/*
--- a/usr/src/uts/sun4v/io/ds_pri.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/io/ds_pri.c	Fri Dec 11 10:41:17 2009 -0800
@@ -23,7 +23,6 @@
  * Use is subject to license terms.
  */
 
-
 /*
  * sun4v domain services PRI driver
  */
@@ -44,7 +43,11 @@
 #include <sys/ddi.h>
 #include <sys/sunddi.h>
 #include <sys/ds.h>
-
+#include <sys/hypervisor_api.h>
+#include <sys/machsystm.h>
+#include <sys/sysmacros.h>
+#include <sys/hsvc.h>
+#include <sys/bitmap.h>
 #include <sys/ds_pri.h>
 
 static uint_t ds_pri_debug = 0;
@@ -85,8 +88,10 @@
 	uint8_t		data[1];
 } ds_pri_msg_t;
 
-	/* The following are bit field flags */
-	/* No service implies no PRI and no outstanding request */
+/*
+ * The following are bit field flags. No service implies no DS PRI and
+ * no outstanding request.
+ */
 typedef enum {
 	DS_PRI_NO_SERVICE = 0x0,
 	DS_PRI_HAS_SERVICE = 0x1,
@@ -117,6 +122,7 @@
 static void *ds_pri_statep;
 
 static void request_pri(ds_pri_state_t *sp);
+static uint64_t ds_get_hv_pri(ds_pri_state_t *sp);
 
 static int ds_pri_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
 static int ds_pri_attach(dev_info_t *, ddi_attach_cmd_t);
@@ -206,11 +212,26 @@
 	NULL
 };
 
+static boolean_t hsvc_pboot_available = B_FALSE;
+static hsvc_info_t pboot_hsvc = {
+	HSVC_REV_1, NULL, HSVC_GROUP_PBOOT, 1, 0, NULL
+};
 
 int
 _init(void)
 {
 	int retval;
+	uint64_t	hsvc_pboot_minor;
+	uint64_t	status;
+
+	status = hsvc_register(&pboot_hsvc, &hsvc_pboot_minor);
+	if (status == H_EOK) {
+		hsvc_pboot_available = B_TRUE;
+	} else {
+		DS_PRI_DBG("hypervisor services not negotiated "
+		    "for group number: 0x%lx errorno: 0x%lx\n",
+		    pboot_hsvc.hsvc_group, status);
+	}
 
 	retval = ddi_soft_state_init(&ds_pri_statep,
 	    sizeof (ds_pri_state_t), 0);
@@ -244,6 +265,8 @@
 
 	ddi_soft_state_fini(&ds_pri_statep);
 
+	(void) hsvc_unregister(&pboot_hsvc);
+
 	return (retval);
 }
 
@@ -286,6 +309,7 @@
 	int instance;
 	ds_pri_state_t *sp;
 	int rv;
+	uint64_t status;
 
 	switch (cmd) {
 	case DDI_ATTACH:
@@ -332,6 +356,16 @@
 	sp->req_id = 0;
 	sp->num_opens = 0;
 
+	/*
+	 * See if we can get the static hv pri data. Static pri data
+	 * is only available for privileged domains.
+	 */
+	if (hsvc_pboot_available == B_TRUE) {
+		if ((status = ds_get_hv_pri(sp)) != 0) {
+			cmn_err(CE_NOTE, "ds_get_hv_pri failed: 0x%lx", status);
+		}
+	}
+
 	if ((rv = ds_cap_init(&ds_pri_cap, &ds_pri_ops)) != 0) {
 		cmn_err(CE_NOTE, "ds_cap_init failed: %d", rv);
 		goto fail;
@@ -342,6 +376,8 @@
 	return (DDI_SUCCESS);
 
 fail:
+	if (sp->ds_pri)
+		kmem_free(sp->ds_pri, sp->ds_pri_len);
 	ddi_remove_minor_node(dip, NULL);
 	cv_destroy(&sp->cv);
 	mutex_destroy(&sp->lock);
@@ -410,28 +446,27 @@
 	mutex_enter(&sp->lock);
 
 	/*
-	 * If we're here and the state is DS_PRI_NO_SERVICE then this
-	 * means that ds hasn't yet called the registration callback.
+	 * Proceed if we have PRI data (possibly obtained from
+	 * static HV PRI or last pushed DS PRI data update).
+	 * If no PRI data and we have no DS PRI service then this
+	 * means that PRI DS has never called the registration callback.
 	 * A while loop is necessary as we might have been woken up
 	 * prematurely, e.g., due to a debugger or "pstack" etc.
 	 * Wait here and the callback will signal us when it has completed
 	 * its work.
 	 */
-	while (sp->state == DS_PRI_NO_SERVICE) {
-		if (cv_wait_sig(&sp->cv, &sp->lock) == 0) {
-			mutex_exit(&sp->lock);
-			return (EINTR);
+	if (!(sp->state & DS_PRI_HAS_PRI)) {
+		while (!(sp->state & DS_PRI_HAS_SERVICE)) {
+			if (cv_wait_sig(&sp->cv, &sp->lock) == 0) {
+				mutex_exit(&sp->lock);
+				return (EINTR);
+			}
 		}
 	}
 
 	sp->num_opens++;
 	mutex_exit(&sp->lock);
 
-	/*
-	 * On open we dont fetch the PRI even if we have a valid service
-	 * handle. PRI fetch is essentially lazy and on-demand.
-	 */
-
 	DS_PRI_DBG("ds_pri_open: state = 0x%x\n", sp->state);
 
 	return (0);
@@ -465,19 +500,6 @@
 		return (0);
 	}
 
-	/* If we have an old PRI - remove it */
-	if (sp->state & DS_PRI_HAS_PRI) {
-		if (sp->ds_pri != NULL && sp->ds_pri_len > 0) {
-			/*
-			 * remove the old data if we have an
-			 * outstanding request
-			 */
-			kmem_free(sp->ds_pri, sp->ds_pri_len);
-			sp->ds_pri_len = 0;
-			sp->ds_pri = NULL;
-		}
-		sp->state &= ~DS_PRI_HAS_PRI;
-	}
 	sp->state &= ~DS_PRI_REQUESTED;
 	mutex_exit(&sp->lock);
 	return (0);
@@ -715,22 +737,19 @@
 	DS_PRI_DBG("ds_pri_reg_handler: registering handle 0x%lx for version "
 	    "0x%x:0x%x\n", (uint64_t)hdl, ver->major, ver->minor);
 
-	/* When the domain service comes up automatically req the pri */
+	/* When the domain service comes up automatically update the state */
 	mutex_enter(&sp->lock);
 
 	ASSERT(sp->ds_pri_handle == DS_INVALID_HDL);
 	sp->ds_pri_handle = hdl;
 
-	ASSERT(sp->state == DS_PRI_NO_SERVICE);
-	ASSERT(sp->ds_pri == NULL);
-	ASSERT(sp->ds_pri_len == 0);
-
-	/* have service, but no PRI */
+	ASSERT(!(sp->state & DS_PRI_HAS_SERVICE));
 	sp->state |= DS_PRI_HAS_SERVICE;
 
 	/*
 	 * Cannot request a PRI here, because the reg handler cannot
 	 * do a DS send operation - we take care of this later.
+	 * Static hv pri data might be available.
 	 */
 
 	/* Wake up anyone waiting in open() */
@@ -755,14 +774,16 @@
 
 	mutex_enter(&sp->lock);
 
-	/* Once the service goes - if we have a PRI at hand free it up */
-	if (sp->ds_pri_len != 0) {
-		kmem_free(sp->ds_pri, sp->ds_pri_len);
-		sp->ds_pri_len = 0;
-		sp->ds_pri = NULL;
-	}
+	/*
+	 * Note that if the service goes offline, we don't
+	 * free up the current PRI data at hand. It is assumed
+	 * that PRI DS service will only push new update when
+	 * it comes online. We mark the state to indicate no
+	 * DS PRI service is available. The current PRI data if
+	 * available is provided to the consumers.
+	 */
 	sp->ds_pri_handle = DS_INVALID_HDL;
-	sp->state = DS_PRI_NO_SERVICE;
+	sp->state &= ~DS_PRI_HAS_SERVICE;
 
 	mutex_exit(&sp->lock);
 }
@@ -830,6 +851,10 @@
 	}
 
 	pri_size = buflen - sizeof (msgp->hdr);
+	if (pri_size == 0) {
+		cmn_err(CE_WARN, "Received DS pri data of size 0");
+		goto done;
+	}
 	data = kmem_alloc(pri_size, KM_SLEEP);
 	sp->ds_pri = data;
 	sp->ds_pri_len = pri_size;
@@ -843,3 +868,64 @@
 done:;
 	mutex_exit(&sp->lock);
 }
+
+/*
+ * Routine to get static PRI data from the Hypervisor.
+ * If successful, this PRI data is the last known PRI
+ * data generated since the last poweron reset.
+ */
+static uint64_t
+ds_get_hv_pri(ds_pri_state_t *sp)
+{
+	uint64_t	status;
+	uint64_t	pri_size;
+	uint64_t	buf_size;
+	uint64_t	buf_pa;
+	caddr_t		buf_va = NULL;
+	caddr_t		pri_data;
+
+	/*
+	 * Get pri buffer size by calling hcall with buffer size 0.
+	 */
+	pri_size = 0LL;
+	status = hv_mach_pri((uint64_t)0, &pri_size);
+	DS_PRI_DBG("ds_get_hv_pri: hv_mach_pri pri size: 0x%lx\n", pri_size);
+	if (pri_size == 0)
+		return (1);
+
+	if (status == H_ENOTSUPPORTED || status == H_ENOACCESS) {
+		DS_PRI_DBG("ds_get_hv_pri: hv_mach_pri service is not "
+		    "available. errorno: 0x%lx\n", status);
+		return (status);
+	}
+
+	/*
+	 * contig_mem_alloc requires size to be a power of 2.
+	 * Increase size to next power of 2 if necessary.
+	 */
+	if ((pri_size & (pri_size - 1)) != 0)
+		buf_size = 1 << highbit(pri_size);
+	DS_PRI_DBG("ds_get_hv_pri: buf_size = 0x%lx\n", buf_size);
+
+	buf_va = contig_mem_alloc(buf_size);
+	if (buf_va == NULL)
+		return (1);
+
+	buf_pa = va_to_pa(buf_va);
+	DS_PRI_DBG("ds_get_hv_pri: buf_pa 0x%lx\n", buf_pa);
+	status = hv_mach_pri(buf_pa, &pri_size);
+	DS_PRI_DBG("ds_get_hv_pri: hv_mach_pri status = 0x%lx\n", status);
+
+	if (status == H_EOK) {
+		pri_data = kmem_alloc(pri_size, KM_SLEEP);
+		sp->ds_pri = pri_data;
+		sp->ds_pri_len = pri_size;
+		bcopy(buf_va, pri_data, sp->ds_pri_len);
+		sp->state |= DS_PRI_HAS_PRI;
+		sp->gencount++;
+	}
+
+	contig_mem_free(buf_va, buf_size);
+
+	return (status);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/io/ds_pri_hcall.s	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,59 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Hypervisor calls called by ds_pri driver.
+ */
+
+#include <sys/asm_linkage.h>
+#include <sys/hypervisor_api.h>
+
+#if defined(lint) || defined(__lint)
+
+/*ARGSUSED*/
+uint64_t
+hv_mach_pri(uint64_t buffer_ra, uint64_t *buffer_sizep)
+{ return (0); }
+
+#else	/* lint || __lint */
+
+	/*
+	 * MACH_PRI
+	 * arg0 buffer real address
+	 * arg1 pointer to uint64_t for size of buffer
+	 * ret0 status
+	 * ret1 return required size of buffer / returned data size
+	 */
+	ENTRY(hv_mach_pri)
+	mov	%o1, %o4		! save datap
+	ldx	[%o1], %o1
+	mov	HV_MACH_PRI, %o5
+	ta	FAST_TRAP
+	retl
+	stx	%o1, [%o4]
+	SET_SIZE(hv_mach_pri)
+
+#endif	/* lint || __lint */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/io/iospc/iospc.c	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,663 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * IO Performance Counter Driver
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include "iospc.h"
+
+/* Debugging level. */
+#ifdef DEBUG
+int iospc_debug = 0;
+#endif /* DEBUG */
+
+/* State structure anchor. */
+void *iospc_state_p;
+
+static int iospc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+static int iospc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
+static int iospc_create_name_kstat(iospc_grp_t *grp);
+static void iospc_delete_name_kstats(kstat_t **name_kstats_pp,
+    int num_kstats);
+static kstat_t *iospc_create_cntr_kstat(char *name, int dev_inst,
+    int (*update)(kstat_t *, int), iospc_ksinfo_t *ksinfop, int num_pics);
+static int iospc_kstat_update(kstat_t *ksp, int rw);
+static kstat_t *iospc_create_picN_kstat(char *mod_name, int pic,
+    uint64_t mask, int num_ev, iospc_event_t *ev_array);
+
+iospc_grp_t **iospc_leaf_grps = NULL;
+int iospc_kstat_inited = 0;
+kmutex_t iospc_mutex;
+
+static struct dev_ops iospc_ops = {
+	DEVO_REV,
+	0,
+	nulldev,
+	nulldev,
+	nulldev,
+	iospc_attach,
+	iospc_detach,
+	nodev,
+	NULL,
+	NULL,
+	nodev
+};
+
+extern struct mod_ops mod_driverops;
+
+static struct modldrv md = {
+	&mod_driverops,
+	"IO Perf Counter Driver",
+	&iospc_ops,
+};
+
+static struct modlinkage ml = {
+	MODREV_1,
+	(void *)&md,
+	NULL
+};
+
+/*
+ * One-time module-wide initialization.
+ */
+int
+_init(void)
+{
+	int rval;
+
+	/* Initialize per-leaf soft state pointer. */
+	if ((rval = ddi_soft_state_init(&iospc_state_p,
+	    sizeof (iospc_t), 1)) != DDI_SUCCESS)
+		return (rval);
+
+	/* If all checks out, install the module. */
+	if ((rval = mod_install(&ml)) != DDI_SUCCESS) {
+		ddi_soft_state_fini(&iospc_state_p);
+		return (rval);
+	}
+	mutex_init(&iospc_mutex, NULL, MUTEX_DRIVER, NULL);
+	return (DDI_SUCCESS);
+}
+
+/*
+ * One-time module-wide cleanup, after last detach is done.
+ */
+int
+_fini(void)
+{
+	int rval;
+
+	/*
+	 * Remove the module first as this operation is the only thing here
+	 * which can fail.
+	 */
+	rval = mod_remove(&ml);
+	if (rval != DDI_SUCCESS)
+		return (rval);
+
+	if (iospc_leaf_grps != NULL) {
+		iospc_kstat_fini();
+		mutex_enter(&iospc_mutex);
+		iospc_kstat_inited = 0;
+		(void) rfios_unbind_group();
+		iospc_leaf_grps = NULL;
+		mutex_exit(&iospc_mutex);
+	}
+
+	mutex_destroy(&iospc_mutex);
+
+	/* Free px soft state */
+	ddi_soft_state_fini(&iospc_state_p);
+
+	return (DDI_SUCCESS);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+	return (mod_info(&ml, modinfop));
+}
+
+/*
+ * Per-instance initialization.  Suspend/resume not supported.
+ */
+static int
+iospc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+	iospc_t *iospc_p;
+	int instance = ddi_get_instance(dip);
+	char *ptr;
+
+	IOSPC_DBG2("iospc: iospc_attach: enter\n");
+	switch (cmd) {
+	case DDI_RESUME:
+	case DDI_ATTACH:
+		/* Initialize one-time kstat structures. */
+		mutex_enter(&iospc_mutex);
+		if (!iospc_kstat_inited) {
+			if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
+			    0, "compatible", &ptr)) != DDI_PROP_SUCCESS)
+				goto bad_property;
+
+			if ((strcmp(ptr, "SUNW,ktios-pr") == 0) ||
+			    (strcmp(ptr, "SUNW,rfios-pr") == 0)) {
+				iospc_leaf_grps = rfios_bind_group();
+			} else {
+				ddi_prop_free(ptr);
+				goto bad_property;
+			}
+
+			ddi_prop_free(ptr);
+
+			if (iospc_kstat_init() != DDI_SUCCESS)
+				goto bad_kstat_init;
+
+			iospc_kstat_inited++;
+		}
+		mutex_exit(&iospc_mutex);
+
+		if (ddi_soft_state_zalloc(iospc_state_p, instance) !=
+		    DDI_SUCCESS) {
+			goto bad_softstate;
+		}
+
+		iospc_p = (iospc_t *)ddi_get_soft_state(iospc_state_p,
+		    instance);
+
+		iospc_p->iospc_dip = dip;
+
+		/* Set up kstats. */
+
+		if (iospc_kstat_attach(iospc_p) != DDI_SUCCESS)
+			goto bad_kstat_attach;
+
+		IOSPC_DBG2("iospc: iospc_attach: exit SUCCESS\n");
+
+		return (DDI_SUCCESS);
+
+bad_kstat_attach:
+		(void) ddi_soft_state_free(iospc_state_p, instance);
+bad_softstate:
+		iospc_kstat_fini();
+bad_kstat_init:
+bad_property:
+		mutex_enter(&iospc_mutex);
+		IOSPC_DBG2("iospc: iospc_attach: exit FAILURE\n");
+		return (DDI_FAILURE);
+
+	default:
+		return (DDI_FAILURE);
+	}
+}
+
+/*
+ * Per-instance cleanup.  Suspend/resume not supported.
+ */
+static int
+iospc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+	int instance = ddi_get_instance(dip);
+
+	IOSPC_DBG2("iospc: iospc_detach: enter\n");
+	iospc_t *iospc_p = (iospc_t *)ddi_get_soft_state(
+	    iospc_state_p, instance);
+
+	switch (cmd) {
+	case DDI_SUSPEND:
+	case DDI_DETACH:
+		iospc_kstat_detach(iospc_p);
+		(void) ddi_soft_state_free(iospc_state_p, instance);
+
+		IOSPC_DBG2("iospc: iospc_detach: exit - SUCCESS\n");
+		return (DDI_SUCCESS);
+
+	default:
+		IOSPC_DBG2("iospc: iospc_detach: exit - FAILURE\n");
+		return (DDI_FAILURE);
+	}
+}
+
+#define	PIC_STR_LEN	5	/* Size of a PICx name string. */
+
+/*
+ * One-time initialization for this module.
+ */
+int
+iospc_kstat_init()
+{
+	iospc_grp_t **grp_pp;
+	iospc_grp_t *grp_p;
+
+	IOSPC_DBG2("iospc: kstat_init: enter\n");
+
+	/*
+	 * Initialize the name kstats for each group, drawing upon the table
+	 * for values.
+	 */
+	for (grp_pp = iospc_leaf_grps; *grp_pp != NULL; grp_pp++) {
+
+		grp_p = *grp_pp;
+
+		IOSPC_DBG2("Setting up group for %s\n", grp_p->grp_name);
+
+		/* Create basic pic event-type pair. */
+		grp_p->name_kstats_pp = kmem_zalloc((grp_p->num_counters *
+		    sizeof (kstat_t)), KM_SLEEP);
+		if (iospc_create_name_kstat(grp_p) != DDI_SUCCESS) {
+			iospc_kstat_fini();
+			IOSPC_DBG1("iospc: init: failure exit\n");
+			return (DDI_FAILURE);
+		}
+	}
+
+	IOSPC_DBG2("iospc: kstat_init: success exit\n");
+
+	return (DDI_SUCCESS);
+}
+
+/*
+ * Per-instance initialization for this module.
+ */
+int
+iospc_kstat_attach(iospc_t *iospc_p)
+{
+	iospc_grp_t **grp_pp;
+	iospc_grp_t *grp_p;
+	iospc_ksinfo_t *ksinfo_p;
+
+	int i;
+
+	IOSPC_DBG2("iospc: kstat_attach %d: enter\n",
+	    ddi_get_instance(iospc_p->iospc_dip));
+
+	/* Set up kstats for each group. */
+	for (i = 0, grp_pp = iospc_leaf_grps; *grp_pp != NULL; i++, grp_pp++) {
+
+		if (i >= IOSPC_MAX_NUM_GRPS)
+			goto err;
+
+		grp_p = *grp_pp;
+
+		/*
+		 * ksinfo_p keeps all info needed by iospc_kstat_update,
+		 * which is fired off asynchronously on demand by the kstat
+		 * framework.
+		 */
+		ksinfo_p = (iospc_ksinfo_t *)kmem_zalloc(
+		    sizeof (iospc_ksinfo_t), KM_SLEEP);
+
+		ksinfo_p->iospc_p = iospc_p;
+		ksinfo_p->grp_p  = grp_p;
+
+		/* Also save in state structure, for later cleanup. */
+		iospc_p->iospc_ksinfo_p[i] = ksinfo_p;
+
+		/* Create counter kstats */
+		ksinfo_p->cntr_ksp = iospc_create_cntr_kstat(grp_p->grp_name,
+		    ddi_get_instance(iospc_p->iospc_dip),
+		    iospc_kstat_update, ksinfo_p, grp_p->num_counters);
+
+		if (ksinfo_p->cntr_ksp == NULL)
+			goto err;
+
+		if (grp_p->access_init(iospc_p, ksinfo_p) != SUCCESS)
+			goto err;
+	}
+
+	IOSPC_DBG2("iospc: kstat_attach: success exit\n");
+	return (DDI_SUCCESS);
+err:
+	iospc_kstat_detach(iospc_p);
+	IOSPC_DBG2("iospc: kstat_attach: failure exit\n");
+	return (DDI_FAILURE);
+}
+
+/*
+ * Create the name kstats for each group.
+ */
+static int
+iospc_create_name_kstat(iospc_grp_t *grp_p)
+{
+	int i;
+
+	for (i = 0; i < grp_p->num_counters; i++) {
+		grp_p->name_kstats_pp[i] = iospc_create_picN_kstat(
+		    grp_p->grp_name, i,
+		    grp_p->regsel_p->fields_p[i].event_offset,
+		    grp_p->regsel_p->fields_p[i].num_events,
+		    grp_p->regsel_p->fields_p[i].events_p);
+
+		if (grp_p->name_kstats_pp[i] == NULL)
+			return (DDI_FAILURE);
+	}
+	return (DDI_SUCCESS);
+}
+
+/*
+ * Create the picN kstat. Returns a pointer to the
+ * kstat which the driver must store to allow it
+ * to be deleted when necessary.
+ */
+static kstat_t *
+iospc_create_picN_kstat(char *mod_name, int pic, uint64_t ev_offset,
+    int num_ev, iospc_event_t *ev_array)
+{
+	int event;
+	char pic_name[PIC_STR_LEN];
+	kstat_t	*picN_ksp = NULL;
+	struct kstat_named *pic_named_data;
+
+	(void) snprintf(pic_name, PIC_STR_LEN, "pic%1d", pic);
+
+	if ((picN_ksp = kstat_create(mod_name, 0, pic_name,
+	    "bus", KSTAT_TYPE_NAMED, num_ev, NULL)) == NULL) {
+		return (NULL);
+	}
+
+	/* NOTE: Number of events is assumed to always be non-zero. */
+
+	pic_named_data = (struct kstat_named *)picN_ksp->ks_data;
+
+	/*
+	 * Fill up data section of the kstat
+	 * Write event names and their associated pcr masks.
+	 * num_ev - 1 is because CLEAR_PIC is added separately.
+	 */
+	for (event = 0; event < num_ev - 1; event++) {
+		pic_named_data[event].value.ui64 =
+		    ev_array[event].value << ev_offset;
+
+		kstat_named_init(&pic_named_data[event],
+		    ev_array[event].name, KSTAT_DATA_UINT64);
+	}
+
+	/*
+	 * add the clear_pic entry
+	 */
+	pic_named_data[event].value.ui64 =
+	    (uint64_t)~(ev_array[event].value << ev_offset);
+
+	kstat_named_init(&pic_named_data[event], ev_array[event].name,
+	    KSTAT_DATA_UINT64);
+
+	kstat_install(picN_ksp);
+
+	return (picN_ksp);
+}
+
+/*
+ * Create the "counters" kstat.
+ */
+static kstat_t *
+iospc_create_cntr_kstat(char *name, int dev_inst,
+    int (*update)(kstat_t *, int), iospc_ksinfo_t *ksinfop, int num_pics)
+{
+	int i;
+	char pic_str[PIC_STR_LEN];
+	struct kstat *counters_ksp;
+	struct kstat_named *counters_named_data;
+
+	IOSPC_DBG2("iospc_create_cntr_kstat: name: %s instance: %d\n",
+	    name, dev_inst);
+
+	/*
+	 * Size of kstat is num_pics + 1. extra one for pcr.
+	 */
+
+	if ((counters_ksp = kstat_create(name, dev_inst, "counters", "bus",
+	    KSTAT_TYPE_NAMED, num_pics + 1, KSTAT_FLAG_WRITABLE)) == NULL) {
+		return (NULL);
+	}
+
+	counters_named_data = (struct kstat_named *)(counters_ksp->ks_data);
+	kstat_named_init(&counters_named_data[0], "pcr", KSTAT_DATA_UINT64);
+
+	for (i = 0; i < num_pics; i++) {
+		(void) snprintf(pic_str, PIC_STR_LEN, "pic%1d", i);
+
+		kstat_named_init(&counters_named_data[i+1], pic_str,
+		    KSTAT_DATA_UINT64);
+	}
+
+	/*
+	 * Store the reg type and other info. in the kstat's private field
+	 * so that they are available to the update function.
+	 */
+	counters_ksp->ks_private = (void *)ksinfop;
+	counters_ksp->ks_update = update;
+
+	kstat_install(counters_ksp);
+
+	return (counters_ksp);
+}
+
+/*
+ * Program a performance counter.
+ *
+ * reggroup is which type of counter.
+ * counter is the counter number.
+ * event is the event to program for that counter.
+ */
+static int
+iospc_perfcnt_program(iospc_t *iospc_p, iospc_grp_t *grp_p,
+	iospc_ksinfo_t *ksinfo_p, uint64_t new_events)
+{
+	uint64_t old_events;
+	int rval = SUCCESS;
+	uint64_t event_mask;
+	int counter;
+
+	IOSPC_DBG1(
+	    "iospc_perfcnt_program enter: new_events:0x%" PRIx64 "\n",
+	    new_events);
+
+	if ((rval = grp_p->access(iospc_p, ksinfo_p->arg, IOSPC_REG_READ,
+	    grp_p->regsel_p->regoff, &old_events)) != SUCCESS)
+		goto done_pgm;
+
+	IOSPC_DBG1("  old_events:0x%" PRIx64 "\n", old_events);
+
+	for (counter = 0; counter < grp_p->num_counters; counter++) {
+
+		if (grp_p->counters_p[counter].zero_regoff == NO_REGISTER)
+			continue;
+
+		event_mask = grp_p->regsel_p->fields_p[counter].event_mask <<
+		    grp_p->regsel_p->fields_p[counter].event_offset;
+
+		IOSPC_DBG1(
+		    "grp:%s, counter:%d, zero_regoff:0x%lx, "
+		    "event_mask:0x%" PRIx64 ", old&mask:0x%lx, "
+		    "new&mask:0x%lx\n",
+		    grp_p->grp_name, counter,
+		    grp_p->counters_p[counter].zero_regoff,
+		    event_mask, old_events & event_mask,
+		    new_events & event_mask);
+
+		if ((old_events & event_mask) ==
+		    (new_events & event_mask))
+			continue;
+
+		IOSPC_DBG1("Zeroing counter %d\n", counter);
+
+		if ((rval = grp_p->access(iospc_p, ksinfo_p->arg,
+		    IOSPC_REG_WRITE, grp_p->counters_p[counter].zero_regoff,
+		    &grp_p->counters_p[counter].zero_value)) != SUCCESS)
+			goto done_pgm;
+	}
+
+	if (old_events != new_events) {
+
+		IOSPC_DBG1("old != new, setting event reg %ld to 0x%lx\n",
+		    grp_p->regsel_p->regoff, new_events);
+
+		if ((rval = grp_p->access(iospc_p, ksinfo_p->arg,
+		    IOSPC_REG_WRITE, grp_p->regsel_p->regoff, &new_events))
+		    != SUCCESS) {
+			IOSPC_DBG1(
+			    "Write of new event data failed, "
+			    "select reg offset: %ld\n",
+			    grp_p->regsel_p->regoff);
+			goto done_pgm;
+		}
+	}
+done_pgm:
+	IOSPC_DBG1("iospc_perfcnt_program: returning status %d.\n", rval);
+	return (rval);
+}
+
+/*
+ * kstat update function. Handles reads/writes
+ * from/to kstat.
+ */
+static int
+iospc_kstat_update(kstat_t *ksp, int rw)
+{
+	struct kstat_named *data_p;
+	int counter;
+	iospc_ksinfo_t *ksinfop = ksp->ks_private;
+	iospc_grp_t *grp_p = ksinfop->grp_p;
+	iospc_t *iospc_p = ksinfop->iospc_p;
+
+	data_p = (struct kstat_named *)ksp->ks_data;
+
+	if (rw == KSTAT_WRITE) {
+
+		IOSPC_DBG2("iospc_kstat_update: wr %ld\n",
+		    data_p[0].value.ui64);
+
+		/*
+		 * Fields without programmable events won't be zeroed as
+		 * iospc_perfcnt_program is what zeros them.
+		 */
+
+		/* This group has programmable events. */
+		if (grp_p->regsel_p->regoff != NO_REGISTER) {
+
+			IOSPC_DBG2("write: regoff has valid register\n");
+			if (iospc_perfcnt_program(iospc_p, grp_p, ksinfop,
+			    data_p[0].value.ui64) != SUCCESS)
+				return (EIO);
+		}
+
+	} else {	/* Read the event register and all of the counters. */
+
+		/* This group has programmable events. */
+		if (grp_p->regsel_p->regoff != NO_REGISTER) {
+
+			IOSPC_DBG2("read: regoff has valid register\n");
+
+			if (grp_p->access(iospc_p, ksinfop->arg, IOSPC_REG_READ,
+			    grp_p->regsel_p->regoff, &data_p[0].value.ui64)
+			    != SUCCESS)
+				return (EIO);
+		} else
+			data_p[0].value.ui64 = 0ull;
+
+		IOSPC_DBG2("iospc_kstat_update: rd event %lx\n",
+		    data_p[0].value.ui64);
+
+		for (counter = 0; counter < grp_p->num_counters; counter++) {
+
+			if (grp_p->access(iospc_p, ksinfop->arg, IOSPC_REG_READ,
+			    grp_p->counters_p[counter].regoff,
+			    &data_p[counter + 1].value.ui64) != SUCCESS)
+				return (EIO);
+
+			IOSPC_DBG2("cntr%d, off:0x%lx, val:0x%lx\n", counter,
+			    grp_p->counters_p[counter].regoff,
+			    data_p[counter + 1].value.ui64);
+		}
+	}
+	return (SUCCESS);
+}
+
+void
+iospc_kstat_fini()
+{
+	iospc_grp_t **grp_pp;
+	iospc_grp_t *grp_p;
+	int j;
+
+	IOSPC_DBG2("iospc_kstat_fini called\n");
+
+	for (j = 0, grp_pp = iospc_leaf_grps; *grp_pp != NULL; j++, grp_pp++) {
+		grp_p = *grp_pp;
+		if (grp_p->name_kstats_pp != NULL) {
+			iospc_delete_name_kstats(grp_p->name_kstats_pp,
+			    grp_p->num_counters);
+			kmem_free(grp_p->name_kstats_pp,
+			    grp_p->num_counters * sizeof (kstat_t));
+			grp_p->name_kstats_pp = NULL;
+		}
+	}
+}
+
+static void
+iospc_delete_name_kstats(kstat_t **name_kstats_pp, int num_kstats)
+{
+	int i;
+
+	if (name_kstats_pp != NULL) {
+		for (i = 0; i < num_kstats; i++) {
+			if (name_kstats_pp[i] != NULL)
+				kstat_delete(name_kstats_pp[i]);
+		}
+	}
+}
+
+void
+iospc_kstat_detach(iospc_t *iospc_p)
+{
+	iospc_grp_t **grp_pp;
+	iospc_grp_t *grp_p;
+	int i;
+
+	IOSPC_DBG2("iospc_kstat_detach called\n");
+
+	for (i = 0, grp_pp = iospc_leaf_grps; *grp_pp != NULL; i++, grp_pp++) {
+
+		if (i >= IOSPC_MAX_NUM_GRPS)
+			return;
+
+		grp_p = *grp_pp;
+		if (iospc_p->iospc_ksinfo_p[i] != NULL) {
+
+			grp_p->access_fini(iospc_p, iospc_p->iospc_ksinfo_p[i]);
+
+			if (iospc_p->iospc_ksinfo_p[i]->cntr_ksp != NULL)
+				kstat_delete(
+				    iospc_p->iospc_ksinfo_p[i]->cntr_ksp);
+			kmem_free(iospc_p->iospc_ksinfo_p[i],
+			    sizeof (iospc_ksinfo_t));
+		}
+
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/io/iospc/iospc.conf	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,27 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+
+ddi-forceattach=1;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/io/iospc/iospc.h	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,153 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_IOSPC_H
+#define	_IOSPC_H
+
+/*
+ * Definitions which deal with things other than registers.
+ */
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <sys/sunddi.h>
+
+#define	SUCCESS	0
+#define	FAILURE	-1
+
+#define	NAMEINST(dip)	ddi_driver_name(dip), ddi_get_instance(dip)
+
+/* Used for data structure retrieval during kstat update. */
+typedef struct iospc_ksinfo {
+	kstat_t		*cntr_ksp;
+	struct iospc	*iospc_p;
+	struct iospc_grp *grp_p;
+	void		*arg;
+} iospc_ksinfo_t;
+
+#define	IOSPC_MAX_NUM_GRPS	10
+
+/* State structure. */
+typedef struct iospc {
+	dev_info_t	*iospc_dip;
+	iospc_ksinfo_t	*iospc_ksinfo_p[IOSPC_MAX_NUM_GRPS];
+} iospc_t;
+
+/*
+ * Description of a counter's events.  Each counter will have an array of these,
+ * to define the events it can be programmed to report.  Nonprogrammable
+ * counters still need an array of these, to contain the name busstat will
+ * display for it, and a CLEAR_PIC entry.
+ */
+typedef struct iospc_event {
+	char *name;
+	uint64_t value;
+} iospc_event_t;
+
+#define	NUM_EVTS(x)	(sizeof (x) / sizeof (iospc_event_t))
+
+/*
+ * Counter description, including its access logistics and how to zero it.
+ */
+typedef struct iospc_cntr {
+	off_t regoff;		/* Register offset or address. */
+	uint64_t fld_mask;	/* Width of the active part of the register */
+	off_t zero_regoff;	/* Offset of register used to zero counter. */
+	uint64_t zero_value;	/* Value to write to zero_regoff, to clr cntr */
+} iospc_cntr_t;
+
+#define	FULL64BIT	-1ULL   /* Can use this for fld_mask. */
+
+#define	NUM_CTRS(x)	(sizeof (x) / sizeof (iospc_cntr_t))
+
+/*
+ * Description of a counter's event selection.  There will be one entry for
+ * each counter in the group.
+ */
+typedef struct iospc_regsel_fld {
+	iospc_event_t *events_p;
+	int num_events;		/* Size of events array. */
+	uint64_t event_mask;	/* Width of the event field. */
+	int event_offset;	/* Offset of the event field. */
+} iospc_regsel_fld_t;
+
+/*
+ * Description of a group's select register.
+ */
+typedef struct iospc_regsel {
+	off_t regoff;			/* Register offset or address. */
+	iospc_regsel_fld_t *fields_p;	/* select reg subfield descriptions.  */
+	int num_fields;			/* Size of the fields array. */
+} iospc_regsel_t;
+
+#define	NUM_FLDS(x)	(sizeof (x) / sizeof (iospc_regsel_fld_t))
+
+#define	IOSPC_REG_READ	0
+#define	IOSPC_REG_WRITE	1
+
+/* Standin symbol for when there is no register. */
+#define	NO_REGISTER	(off_t)-1ULL
+
+/*
+ * Group description.
+ */
+typedef struct iospc_grp {
+	char *grp_name;		 /* Name, shows up as busstat "module" name. */
+	iospc_regsel_t *regsel_p; /* Select register. */
+	iospc_cntr_t *counters_p; /* Counter definitions. */
+	int num_counters;	 /* Size of the counters array. */
+	int (*access_init)(iospc_t *iospc_p, iospc_ksinfo_t *ksinfo_p);
+	int (*access)(iospc_t *iospc_p, void *, int op, int regid,
+	    uint64_t *data);
+	int (*access_fini)(iospc_t *iospc_p, iospc_ksinfo_t *ksinfo_p);
+	kstat_t **name_kstats_pp; /* Named kstats.  One for all instances. */
+} iospc_grp_t;
+
+/* Debugging facility. */
+#ifdef DEBUG
+extern int iospc_debug;
+#define	IOSPC_DBG1 if (iospc_debug >= 1) printf
+#define	IOSPC_DBG2 if (iospc_debug >= 2) printf
+#else
+#define	IOSPC_DBG1 0 &&
+#define	IOSPC_DBG2 0 &&
+#endif	/* DEBUG */
+
+/* Function definitions exported among different modules. */
+extern int iospc_kstat_init(void);
+extern void iospc_kstat_fini(void);
+extern int iospc_kstat_attach(iospc_t *iospc_p);
+extern void iospc_kstat_detach(iospc_t *iospc_p);
+extern iospc_grp_t **rfios_bind_group(void);
+extern void rfios_unbind_group(void);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _IOSPC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/io/iospc/rfios_acc.h	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,90 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_RFIOS_ACC_H
+#define	_RFIOS_ACC_H
+
+/*
+ * Hypervisor and function definitions needed to access the device.
+ * Defined by FWARC 2008/613.
+ */
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#ifndef _ASM
+
+#include <sys/types.h>
+#include <sys/hypervisor_api.h>
+
+typedef uint64_t cntr_handle_t;
+
+extern int rfiospc_get_perfreg(cntr_handle_t handle, int regid, uint64_t *data);
+extern int rfiospc_set_perfreg(cntr_handle_t handle, int regid, uint64_t data);
+
+#endif /* _ASM */
+
+/*
+ * RF IOS API hypervisor group number.
+ */
+#define	RF_PERF_COUNTER_GROUP_ID	0x020a
+
+/*
+ * RF IOS performance counter fasttraps.
+ */
+
+#define	RFIOS_GET_PERFREG	0x165
+#define	RFIOS_SET_PERFREG	0x166
+
+/*
+ * Performance counter register definitions.
+ */
+
+#define	HVIO_RFIOS_PERFREG_PEX_SEL	0
+#define	HVIO_RFIOS_PERFREG_PEX_CNT0	1
+#define	HVIO_RFIOS_PERFREG_PEX_CNT1	2
+#define	HVIO_RFIOS_PERFREG_ATU_SEL	3
+#define	HVIO_RFIOS_PERFREG_ATU_CNT0	4
+#define	HVIO_RFIOS_PERFREG_ATU_CNT1	5
+#define	HVIO_RFIOS_PERFREG_IMU_SEL	6
+#define	HVIO_RFIOS_PERFREG_IMU_CNT0	7
+#define	HVIO_RFIOS_PERFREG_IMU_CNT1	8
+#define	HVIO_RFIOS_PERFREG_NPU_SEL	9
+#define	HVIO_RFIOS_PERFREG_NPU_CNT0	10
+#define	HVIO_RFIOS_PERFREG_NPU_CNT1	11
+#define	HVIO_RFIOS_PERFREG_PEU0_SEL	12
+#define	HVIO_RFIOS_PERFREG_PEU0_CNT0	13
+#define	HVIO_RFIOS_PERFREG_PEU0_CNT1	14
+#define	HVIO_RFIOS_PERFREG_PEU1_SEL	15
+#define	HVIO_RFIOS_PERFREG_PEU1_CNT0	16
+#define	HVIO_RFIOS_PERFREG_PEU1_CNT1	17
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _RFIOS_ACC_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/io/iospc/rfios_asm.s	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,67 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Assembly language support for the iospc IO performance counter driver.
+ */
+ 
+#include <sys/asm_linkage.h>
+#include <sys/hypervisor_api.h>
+#include "rfios_acc.h"
+
+/*LINTLIBRARY*/
+
+#if defined(lint)
+
+/*ARGSUSED*/	
+int
+rfiospc_get_perfreg(cntr_handle_t handle, int regid, uint64_t *data)
+{ return (0); }
+
+/*ARGSUSED*/	
+int
+rfiospc_set_perfreg(cntr_handle_t handle, int regid, uint64_t data)
+{ return (0); }
+
+#else /* lint */
+
+	ENTRY(rfiospc_get_perfreg)
+	mov	RFIOS_GET_PERFREG, %o5
+	ta	FAST_TRAP
+	brz,a	%o0, 1f
+	stx	%o1, [%o2]
+1:	retl
+	nop
+	SET_SIZE(rfiospc_get_perfreg)
+
+	ENTRY(rfiospc_set_perfreg)
+	mov	RFIOS_SET_PERFREG, %o5
+	ta	FAST_TRAP
+	retl
+	nop
+	SET_SIZE(rfiospc_set_perfreg)
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/io/iospc/rfios_iospc.c	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,136 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/kmem.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/disp.h>
+#include <sys/stat.h>
+#include <sys/pci.h>
+#include <sys/hsvc.h>
+#include "iospc.h"
+#include "rfios_acc.h"
+#include "rfios_tables.h"
+
+extern iospc_grp_t *rfios_leaf_grps[];
+
+#define	RF_REQ_MAJOR_VER	1
+#define	RF_REQ_MINOR_VER	0
+
+static hsvc_info_t rfios_hsvc = {
+	HSVC_REV_1,
+	NULL,
+	RF_PERF_COUNTER_GROUP_ID,
+	RF_REQ_MAJOR_VER,
+	RF_REQ_MINOR_VER,
+	MODULE_NAME	/* Passed in as a #define from Makefile */
+};
+
+static uint64_t rfios_sup_minor;
+
+iospc_grp_t **
+rfios_bind_group(void)
+{
+	int rval;
+
+	if ((rval = hsvc_register(&rfios_hsvc, &rfios_sup_minor)) !=
+	    DDI_SUCCESS) {
+		IOSPC_DBG1("%s: Could not hsvc_register: %d\n",
+		    MODULE_NAME, rval);
+
+		return (NULL);
+	}
+
+	return ((iospc_grp_t **)&rfios_leaf_grps);
+}
+
+void
+rfios_unbind_group(void)
+{
+	(void) hsvc_unregister(&rfios_hsvc);
+}
+
+int
+rfios_access_init(iospc_t *iospc_p, iospc_ksinfo_t *ksinfo_p)
+{
+	uint32_t regprop[4];
+	int len;
+	cntr_handle_t   iospc_handle;
+
+	IOSPC_DBG2("rfios_access_init: iospc_p=%p\n", (void *)iospc_p);
+
+	len = sizeof (regprop);
+	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, iospc_p->iospc_dip,
+	    DDI_PROP_DONTPASS, "reg", (caddr_t)regprop, &len) !=
+	    DDI_SUCCESS) {
+		return (FAILURE);
+	}
+
+	iospc_handle = (regprop[0] & 0xfffffff);
+	ksinfo_p->arg = (void *)iospc_handle;
+
+	return (SUCCESS);
+
+}
+
+int
+rfios_access_fini(iospc_t *iospc_p, iospc_ksinfo_t *ksinfo_p)
+{
+	IOSPC_DBG2("rfios_access_fini: iospc_p=%p ksinfo_p=%p\n",
+	    (void *)iospc_p, (void *)ksinfo_p);
+	return (SUCCESS);
+}
+
+int
+rfios_access_hv(iospc_t *iospc_p, void *arg, int op, int regid, uint64_t *data)
+{
+	cntr_handle_t   iospc_handle = (cntr_handle_t)arg;
+
+	if (op == IOSPC_REG_READ) {
+		if (rfiospc_get_perfreg(iospc_handle, regid, data) != H_EOK) {
+			IOSPC_DBG2("rfios_access_hv: READ handle=%p regid=%x "
+			    "- Failed\n", (void *)iospc_p, regid);
+			return (FAILURE);
+		}
+
+		IOSPC_DBG2("rfios_access_hv: READ handle=%p regid=%x "
+		    "data=%lx\n", (void *)iospc_p, regid, *data);
+
+	} else { /* IOSPC_REG_WRITE */
+		if (rfiospc_set_perfreg(iospc_handle, regid, *data) != H_EOK) {
+			IOSPC_DBG2("rfios_access_hv: READ handle=%p regid=%x "
+			    "- Failed\n", (void *)iospc_p, regid);
+			return (FAILURE);
+		}
+
+		IOSPC_DBG2("rfios_access_hv: WRITE  handle=%p regid=%x "
+		    "data=%lx\n", (void *)iospc_p, regid, *data);
+	}
+
+	return (SUCCESS);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/io/iospc/rfios_tables.c	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,856 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * RF IOS tables and functions to drive the IO performance counter driver.
+ *
+ * Please see rfiospc-tables.h for an explanation of how the table is put
+ * together.
+ */
+
+#include <sys/types.h>
+#include <sys/kstat.h>
+#include <sys/sunndi.h>
+#include <sys/pci.h>
+#include <sys/hsvc.h>
+#include "iospc.h"
+#include "rfios_acc.h"
+#include "rfios_tables.h"
+
+static iospc_event_t rfios_imu_ctr_0_evts[] = {
+	{ RFIOS_IMU01_S_EVT_NONE,
+	    RFIOS_IMU01_EVT_NONE },
+	{ RFIOS_IMU01_S_EVT_CLK,
+	    RFIOS_IMU01_EVT_CLK },
+	{ RFIOS_IMU01_S_EVT_TOTAL_MSIX,
+	    RFIOS_IMU01_EVT_TOTAL_MSIX },
+	{ RFIOS_IMU01_S_EVT_IOS_MSI,
+	    RFIOS_IMU01_EVT_IOS_MSI },
+	{ RFIOS_IMU01_S_EVT_PCIE_MSIX,
+	    RFIOS_IMU01_EVT_PCIE_MSIX },
+	{ RFIOS_IMU01_S_EVT_PCIE_MSGS,
+	    RFIOS_IMU01_EVT_PCIE_MSGS },
+	{ RFIOS_IMU01_S_EVT_FILTERED_MSIX,
+	    RFIOS_IMU01_EVT_FILTERED_MSIX },
+	{ RFIOS_IMU01_S_EVT_EQ_WR,
+	    RFIOS_IMU01_EVT_EQ_WR },
+	{ RFIOS_IMU01_S_EVT_MONDOS,
+	    RFIOS_IMU01_EVT_MONDOS },
+	{ COMMON_S_CLEAR_PIC,
+	    RFIOS_IMU_CTR_EVT_MASK }
+};
+
+static iospc_event_t rfios_imu_ctr_1_evts[] = {
+	{ RFIOS_IMU01_S_EVT_NONE,
+	    RFIOS_IMU01_EVT_NONE },
+	{ RFIOS_IMU01_S_EVT_CLK,
+	    RFIOS_IMU01_EVT_CLK },
+	{ RFIOS_IMU01_S_EVT_TOTAL_MSIX,
+	    RFIOS_IMU01_EVT_TOTAL_MSIX },
+	{ RFIOS_IMU01_S_EVT_IOS_MSI,
+	    RFIOS_IMU01_EVT_IOS_MSI },
+	{ RFIOS_IMU01_S_EVT_PCIE_MSIX,
+	    RFIOS_IMU01_EVT_PCIE_MSIX },
+	{ RFIOS_IMU01_S_EVT_PCIE_MSGS,
+	    RFIOS_IMU01_EVT_PCIE_MSGS },
+	{ RFIOS_IMU01_S_EVT_FILTERED_MSIX,
+	    RFIOS_IMU01_EVT_FILTERED_MSIX },
+	{ RFIOS_IMU01_S_EVT_EQ_WR,
+	    RFIOS_IMU01_EVT_EQ_WR },
+	{ RFIOS_IMU01_S_EVT_MONDOS,
+	    RFIOS_IMU01_EVT_MONDOS },
+	{ COMMON_S_CLEAR_PIC,
+	    RFIOS_IMU_CTR_EVT_MASK }
+};
+
+static iospc_event_t rfios_atu_ctr_0_evts[] = {
+	{ RFIOS_ATU01_S_EVT_NONE,
+	    RFIOS_ATU01_EVT_NONE },
+	{ RFIOS_ATU01_S_EVT_CLK,
+	    RFIOS_ATU01_EVT_CLK },
+	{ RFIOS_ATU01_S_EVT_FLOW_CTRL_STALL,
+	    RFIOS_ATU01_EVT_FLOW_CTRL_STALL },
+	{ RFIOS_ATU01_S_EVT_CLUMP_ACC,
+	    RFIOS_ATU01_EVT_CLUMP_ACC },
+	{ RFIOS_ATU01_S_EVT_CLUMP_MISS,
+	    RFIOS_ATU01_EVT_CLUMP_MISS },
+	{ RFIOS_ATU01_S_EVT_CLUMP_RESETS,
+	    RFIOS_ATU01_EVT_CLUMP_RESETS },
+	{ RFIOS_ATU01_S_EVT_CLUMP_TBL_WALK,
+	    RFIOS_ATU01_EVT_CLUMP_TBL_WALK },
+	{ RFIOS_ATU01_S_EVT_VIRT_ACC,
+	    RFIOS_ATU01_EVT_VIRT_ACC },
+	{ RFIOS_ATU01_S_EVT_VIRT_MISS,
+	    RFIOS_ATU01_EVT_VIRT_MISS },
+	{ RFIOS_ATU01_S_EVT_VIRT_RESETS,
+	    RFIOS_ATU01_EVT_VIRT_RESETS },
+	{ RFIOS_ATU01_S_EVT_VIRT_TBL_WALK,
+	    RFIOS_ATU01_EVT_VIRT_TBL_WALK },
+	{ RFIOS_ATU01_S_EVT_REAL_ACC,
+	    RFIOS_ATU01_EVT_REAL_ACC },
+	{ RFIOS_ATU01_S_EVT_REAL_MISS,
+	    RFIOS_ATU01_EVT_REAL_MISS },
+	{ RFIOS_ATU01_S_EVT_REAL_RESETS,
+	    RFIOS_ATU01_EVT_REAL_RESETS },
+	{ RFIOS_ATU01_S_EVT_REAL_TBL_WALK,
+	    RFIOS_ATU01_EVT_REAL_TBL_WALK },
+	{ RFIOS_ATU01_S_EVT_CMD_ERRORS,
+	    RFIOS_ATU01_EVT_CMD_ERRORS },
+	{ RFIOS_ATU01_S_EVT_VIRT_TRANS,
+	    RFIOS_ATU01_EVT_VIRT_TRANS },
+	{ RFIOS_ATU01_S_EVT_REAL_TRANS,
+	    RFIOS_ATU01_EVT_REAL_TRANS },
+	{ RFIOS_ATU01_S_EVT_PHYS_TRANS,
+	    RFIOS_ATU01_EVT_PHYS_TRANS },
+	{ RFIOS_ATU01_S_EVT_STRICT_ORDER_FORCED,
+	    RFIOS_ATU01_EVT_STRICT_ORDER_FORCED },
+	{ RFIOS_ATU01_S_EVT_RELAX_ORDER_FORCED,
+	    RFIOS_ATU01_EVT_RELAX_ORDER_FORCED },
+	{ RFIOS_ATU01_S_EVT_RELAX_ORDER_TLP,
+	    RFIOS_ATU01_EVT_RELAX_ORDER_TLP },
+	{ RFIOS_ATU01_S_EVT_RELAX_ORDER_TOTAL,
+	    RFIOS_ATU01_EVT_RELAX_ORDER_TOTAL },
+	{ COMMON_S_CLEAR_PIC,
+	    RFIOS_ATU_CTR_EVT_MASK }
+};
+
+static iospc_event_t rfios_atu_ctr_1_evts[] = {
+	{ RFIOS_ATU01_S_EVT_NONE,
+	    RFIOS_ATU01_EVT_NONE },
+	{ RFIOS_ATU01_S_EVT_CLK,
+	    RFIOS_ATU01_EVT_CLK },
+	{ RFIOS_ATU01_S_EVT_FLOW_CTRL_STALL,
+	    RFIOS_ATU01_EVT_FLOW_CTRL_STALL },
+	{ RFIOS_ATU01_S_EVT_CLUMP_ACC,
+	    RFIOS_ATU01_EVT_CLUMP_ACC },
+	{ RFIOS_ATU01_S_EVT_CLUMP_MISS,
+	    RFIOS_ATU01_EVT_CLUMP_MISS },
+	{ RFIOS_ATU01_S_EVT_CLUMP_RESETS,
+	    RFIOS_ATU01_EVT_CLUMP_RESETS },
+	{ RFIOS_ATU01_S_EVT_CLUMP_TBL_WALK,
+	    RFIOS_ATU01_EVT_CLUMP_TBL_WALK },
+	{ RFIOS_ATU01_S_EVT_VIRT_ACC,
+	    RFIOS_ATU01_EVT_VIRT_ACC },
+	{ RFIOS_ATU01_S_EVT_VIRT_MISS,
+	    RFIOS_ATU01_EVT_VIRT_MISS },
+	{ RFIOS_ATU01_S_EVT_VIRT_RESETS,
+	    RFIOS_ATU01_EVT_VIRT_RESETS },
+	{ RFIOS_ATU01_S_EVT_VIRT_TBL_WALK,
+	    RFIOS_ATU01_EVT_VIRT_TBL_WALK },
+	{ RFIOS_ATU01_S_EVT_REAL_ACC,
+	    RFIOS_ATU01_EVT_REAL_ACC },
+	{ RFIOS_ATU01_S_EVT_REAL_MISS,
+	    RFIOS_ATU01_EVT_REAL_MISS },
+	{ RFIOS_ATU01_S_EVT_REAL_RESETS,
+	    RFIOS_ATU01_EVT_REAL_RESETS },
+	{ RFIOS_ATU01_S_EVT_REAL_TBL_WALK,
+	    RFIOS_ATU01_EVT_REAL_TBL_WALK },
+	{ RFIOS_ATU01_S_EVT_CMD_ERRORS,
+	    RFIOS_ATU01_EVT_CMD_ERRORS },
+	{ RFIOS_ATU01_S_EVT_VIRT_TRANS,
+	    RFIOS_ATU01_EVT_VIRT_TRANS },
+	{ RFIOS_ATU01_S_EVT_REAL_TRANS,
+	    RFIOS_ATU01_EVT_REAL_TRANS },
+	{ RFIOS_ATU01_S_EVT_PHYS_TRANS,
+	    RFIOS_ATU01_EVT_PHYS_TRANS },
+	{ RFIOS_ATU01_S_EVT_STRICT_ORDER_FORCED,
+	    RFIOS_ATU01_EVT_STRICT_ORDER_FORCED },
+	{ RFIOS_ATU01_S_EVT_RELAX_ORDER_FORCED,
+	    RFIOS_ATU01_EVT_RELAX_ORDER_FORCED },
+	{ RFIOS_ATU01_S_EVT_RELAX_ORDER_TLP,
+	    RFIOS_ATU01_EVT_RELAX_ORDER_TLP },
+	{ RFIOS_ATU01_S_EVT_RELAX_ORDER_TOTAL,
+	    RFIOS_ATU01_EVT_RELAX_ORDER_TOTAL },
+	{ COMMON_S_CLEAR_PIC,
+	    RFIOS_ATU_CTR_EVT_MASK }
+};
+
+static iospc_event_t rfios_npu_ctr_0_evts[] = {
+	{ RFIOS_NPU01_S_EVT_NONE,
+	    RFIOS_NPU01_EVT_NONE },
+	{ RFIOS_NPU01_S_EVT_CLK,
+	    RFIOS_NPU01_EVT_CLK },
+	{ RFIOS_NPU01_S_EVT_ZERO_BYTE_READ,
+	    RFIOS_NPU01_EVT_ZERO_BYTE_READ },
+	{ RFIOS_NPU01_S_EVT_DMA_WRITE_LATENCY,
+	    RFIOS_NPU01_EVT_DMA_WRITE_LATENCY },
+	{ RFIOS_NPU01_S_EVT_DMA_WRITE_LATENCY_NUM,
+	    RFIOS_NPU01_EVT_DMA_WRITE_LATENCY_NUM },
+	{ RFIOS_NPU01_S_EVT_OSB_FULL_CYCLES,
+	    RFIOS_NPU01_EVT_OSB_FULL_CYCLES },
+	{ RFIOS_NPU01_S_EVT_DMA_READ_LATENCY,
+	    RFIOS_NPU01_EVT_DMA_READ_LATENCY },
+	{ RFIOS_NPU01_S_EVT_DMA_READ_LATENCY_NUM,
+	    RFIOS_NPU01_EVT_DMA_READ_LATENCY_NUM },
+	{ RFIOS_NPU01_S_EVT_PSB_FULL_CYCLES,
+	    RFIOS_NPU01_EVT_PSB_FULL_CYCLES },
+	{ RFIOS_NPU01_S_EVT_ICB_FULL_CYCLES,
+	    RFIOS_NPU01_EVT_ICB_FULL_CYCLES },
+	{ RFIOS_NPU01_S_EVT_ECB_FULL_CYCLES,
+	    RFIOS_NPU01_EVT_ECB_FULL_CYCLES },
+	{ RFIOS_NPU01_S_EVT_ATU_CSR_CFG_WRITES,
+	    RFIOS_NPU01_EVT_ATU_CSR_CFG_WRITES },
+	{ RFIOS_NPU01_S_EVT_ATU_CSR_CFG_READS,
+	    RFIOS_NPU01_EVT_ATU_CSR_CFG_READS },
+	{ RFIOS_NPU01_S_EVT_ATU_CSR_MEM_WRITES,
+	    RFIOS_NPU01_EVT_ATU_CSR_MEM_WRITES },
+	{ RFIOS_NPU01_S_EVT_ATU_CSR_MEM_READS,
+	    RFIOS_NPU01_EVT_ATU_CSR_MEM_READS },
+	{ RFIOS_NPU01_S_EVT_IMU_CSR_CFG_WRITES,
+	    RFIOS_NPU01_EVT_IMU_CSR_CFG_WRITES },
+	{ RFIOS_NPU01_S_EVT_IMU_CSR_CFG_READS,
+	    RFIOS_NPU01_EVT_IMU_CSR_CFG_READS },
+	{ RFIOS_NPU01_S_EVT_IMU_CSR_MEM_WRITES,
+	    RFIOS_NPU01_EVT_IMU_CSR_MEM_WRITES },
+	{ RFIOS_NPU01_S_EVT_IMU_CSR_MEM_READS,
+	    RFIOS_NPU01_EVT_IMU_CSR_MEM_READS },
+	{ RFIOS_NPU01_S_EVT_NPU_CSR_CFG_WRITES,
+	    RFIOS_NPU01_EVT_NPU_CSR_CFG_WRITES },
+	{ RFIOS_NPU01_S_EVT_NPU_CSR_CFG_READS,
+	    RFIOS_NPU01_EVT_NPU_CSR_CFG_READS },
+	{ RFIOS_NPU01_S_EVT_NPU_CSR_MEM_WRITES,
+	    RFIOS_NPU01_EVT_NPU_CSR_MEM_WRITES },
+	{ RFIOS_NPU01_S_EVT_NPU_CSR_MEM_READS,
+	    RFIOS_NPU01_EVT_NPU_CSR_MEM_READS },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_CFG_WRITES,
+	    RFIOS_NPU01_EVT_OTHER_CSR_CFG_WRITES },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_CFG_READS,
+	    RFIOS_NPU01_EVT_OTHER_CSR_CFG_READS },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_MEM64_WRITES,
+	    RFIOS_NPU01_EVT_OTHER_CSR_MEM64_WRITES },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_MEM64_READS,
+	    RFIOS_NPU01_EVT_OTHER_CSR_MEM64_READS },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_MEM32_WRITES,
+	    RFIOS_NPU01_EVT_OTHER_CSR_MEM32_WRITES },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_MEM32_READS,
+	    RFIOS_NPU01_EVT_OTHER_CSR_MEM32_READS },
+	{ RFIOS_NPU01_S_EVT_IO_SPACE_WRITES,
+	    RFIOS_NPU01_EVT_IO_SPACE_WRITES },
+	{ RFIOS_NPU01_S_EVT_IO_SPACE_READS,
+	    RFIOS_NPU01_EVT_IO_SPACE_READS },
+	{ RFIOS_NPU01_S_EVT_TOTAL_MSI,
+	    RFIOS_NPU01_EVT_TOTAL_MSI },
+	{ RFIOS_NPU01_S_EVT_ATU_MSI,
+	    RFIOS_NPU01_EVT_ATU_MSI },
+	{ RFIOS_NPU01_S_EVT_IMU_MSI,
+	    RFIOS_NPU01_EVT_IMU_MSI },
+	{ RFIOS_NPU01_S_EVT_NPU_MSI,
+	    RFIOS_NPU01_EVT_NPU_MSI },
+	{ RFIOS_NPU01_S_EVT_RETIRED_TAGS_CTO,
+	    RFIOS_NPU01_EVT_RETIRED_TAGS_CTO },
+	{ RFIOS_NPU01_S_EVT_NO_POSTED_TAGS_CYCYLES,
+	    RFIOS_NPU01_EVT_NO_POSTED_TAGS_CYCYLES },
+	{ COMMON_S_CLEAR_PIC,
+	    RFIOS_NPU_CTR_EVT_MASK }
+};
+
+static iospc_event_t rfios_npu_ctr_1_evts[] = {
+	{ RFIOS_NPU01_S_EVT_NONE,
+	    RFIOS_NPU01_EVT_NONE },
+	{ RFIOS_NPU01_S_EVT_CLK,
+	    RFIOS_NPU01_EVT_CLK },
+	{ RFIOS_NPU01_S_EVT_ZERO_BYTE_READ,
+	    RFIOS_NPU01_EVT_ZERO_BYTE_READ },
+	{ RFIOS_NPU01_S_EVT_DMA_WRITE_LATENCY,
+	    RFIOS_NPU01_EVT_DMA_WRITE_LATENCY },
+	{ RFIOS_NPU01_S_EVT_DMA_WRITE_LATENCY_NUM,
+	    RFIOS_NPU01_EVT_DMA_WRITE_LATENCY_NUM },
+	{ RFIOS_NPU01_S_EVT_OSB_FULL_CYCLES,
+	    RFIOS_NPU01_EVT_OSB_FULL_CYCLES },
+	{ RFIOS_NPU01_S_EVT_DMA_READ_LATENCY,
+	    RFIOS_NPU01_EVT_DMA_READ_LATENCY },
+	{ RFIOS_NPU01_S_EVT_DMA_READ_LATENCY_NUM,
+	    RFIOS_NPU01_EVT_DMA_READ_LATENCY_NUM },
+	{ RFIOS_NPU01_S_EVT_PSB_FULL_CYCLES,
+	    RFIOS_NPU01_EVT_PSB_FULL_CYCLES },
+	{ RFIOS_NPU01_S_EVT_ICB_FULL_CYCLES,
+	    RFIOS_NPU01_EVT_ICB_FULL_CYCLES },
+	{ RFIOS_NPU01_S_EVT_ECB_FULL_CYCLES,
+	    RFIOS_NPU01_EVT_ECB_FULL_CYCLES },
+	{ RFIOS_NPU01_S_EVT_ATU_CSR_CFG_WRITES,
+	    RFIOS_NPU01_EVT_ATU_CSR_CFG_WRITES },
+	{ RFIOS_NPU01_S_EVT_ATU_CSR_CFG_READS,
+	    RFIOS_NPU01_EVT_ATU_CSR_CFG_READS },
+	{ RFIOS_NPU01_S_EVT_ATU_CSR_MEM_WRITES,
+	    RFIOS_NPU01_EVT_ATU_CSR_MEM_WRITES },
+	{ RFIOS_NPU01_S_EVT_ATU_CSR_MEM_READS,
+	    RFIOS_NPU01_EVT_ATU_CSR_MEM_READS },
+	{ RFIOS_NPU01_S_EVT_IMU_CSR_CFG_WRITES,
+	    RFIOS_NPU01_EVT_IMU_CSR_CFG_WRITES },
+	{ RFIOS_NPU01_S_EVT_IMU_CSR_CFG_READS,
+	    RFIOS_NPU01_EVT_IMU_CSR_CFG_READS },
+	{ RFIOS_NPU01_S_EVT_IMU_CSR_MEM_WRITES,
+	    RFIOS_NPU01_EVT_IMU_CSR_MEM_WRITES },
+	{ RFIOS_NPU01_S_EVT_IMU_CSR_MEM_READS,
+	    RFIOS_NPU01_EVT_IMU_CSR_MEM_READS },
+	{ RFIOS_NPU01_S_EVT_NPU_CSR_CFG_WRITES,
+	    RFIOS_NPU01_EVT_NPU_CSR_CFG_WRITES },
+	{ RFIOS_NPU01_S_EVT_NPU_CSR_CFG_READS,
+	    RFIOS_NPU01_EVT_NPU_CSR_CFG_READS },
+	{ RFIOS_NPU01_S_EVT_NPU_CSR_MEM_WRITES,
+	    RFIOS_NPU01_EVT_NPU_CSR_MEM_WRITES },
+	{ RFIOS_NPU01_S_EVT_NPU_CSR_MEM_READS,
+	    RFIOS_NPU01_EVT_NPU_CSR_MEM_READS },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_CFG_WRITES,
+	    RFIOS_NPU01_EVT_OTHER_CSR_CFG_WRITES },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_CFG_READS,
+	    RFIOS_NPU01_EVT_OTHER_CSR_CFG_READS },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_MEM64_WRITES,
+	    RFIOS_NPU01_EVT_OTHER_CSR_MEM64_WRITES },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_MEM64_READS,
+	    RFIOS_NPU01_EVT_OTHER_CSR_MEM64_READS },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_MEM32_WRITES,
+	    RFIOS_NPU01_EVT_OTHER_CSR_MEM32_WRITES },
+	{ RFIOS_NPU01_S_EVT_OTHER_CSR_MEM32_READS,
+	    RFIOS_NPU01_EVT_OTHER_CSR_MEM32_READS },
+	{ RFIOS_NPU01_S_EVT_IO_SPACE_WRITES,
+	    RFIOS_NPU01_EVT_IO_SPACE_WRITES },
+	{ RFIOS_NPU01_S_EVT_IO_SPACE_READS,
+	    RFIOS_NPU01_EVT_IO_SPACE_READS },
+	{ RFIOS_NPU01_S_EVT_TOTAL_MSI,
+	    RFIOS_NPU01_EVT_TOTAL_MSI },
+	{ RFIOS_NPU01_S_EVT_ATU_MSI,
+	    RFIOS_NPU01_EVT_ATU_MSI },
+	{ RFIOS_NPU01_S_EVT_IMU_MSI,
+	    RFIOS_NPU01_EVT_IMU_MSI },
+	{ RFIOS_NPU01_S_EVT_NPU_MSI,
+	    RFIOS_NPU01_EVT_NPU_MSI },
+	{ RFIOS_NPU01_S_EVT_RETIRED_TAGS_CTO,
+	    RFIOS_NPU01_EVT_RETIRED_TAGS_CTO },
+	{ RFIOS_NPU01_S_EVT_NO_POSTED_TAGS_CYCYLES,
+	    RFIOS_NPU01_EVT_NO_POSTED_TAGS_CYCYLES },
+	{ COMMON_S_CLEAR_PIC,
+	    RFIOS_NPU_CTR_EVT_MASK }
+};
+
+static iospc_event_t rfios_pex_ctr_0_evts[] = {
+	{ RFIOS_PEX01_S_EVT_NONE,
+	    RFIOS_PEX01_EVT_NONE },
+	{ RFIOS_PEX01_S_EVT_CLK,
+	    RFIOS_PEX01_EVT_CLK },
+	{ RFIOS_PEX01_S_EVT_PEU0_DMA_WR_REC,
+	    RFIOS_PEX01_EVT_PEU0_DMA_WR_REC },
+	{ RFIOS_PEX01_S_EVT_PEU0_PIO_RD_REC,
+	    RFIOS_PEX01_EVT_PEU0_PIO_RD_REC },
+	{ RFIOS_PEX01_S_EVT_PEU0_DMA_RD_SENT,
+	    RFIOS_PEX01_EVT_PEU0_DMA_RD_SENT },
+	{ RFIOS_PEX01_S_EVT_PEU0_TLP_REC,
+	    RFIOS_PEX01_EVT_PEU0_TLP_REC },
+	{ RFIOS_PEX01_S_EVT_PEU0_TRP_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU0_TRP_FULL_CYCLES },
+	{ RFIOS_PEX01_S_EVT_PEU0_TCH_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU0_TCH_FULL_CYCLES },
+	{ RFIOS_PEX01_S_EVT_PEU0_TCD_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU0_TCD_FULL_CYCLES },
+	{ RFIOS_PEX01_S_EVT_NON_POSTED_PIOS_LATENCY,
+	    RFIOS_PEX01_EVT_NON_POSTED_PIOS_LATENCY },
+	{ RFIOS_PEX01_S_EVT_NON_POSTED_PIOS_NUM,
+	    RFIOS_PEX01_EVT_NON_POSTED_PIOS_NUM },
+	{ RFIOS_PEX01_S_EVT_PEX_CFG_WRITE,
+	    RFIOS_PEX01_EVT_PEX_CFG_WRITE },
+	{ RFIOS_PEX01_S_EVT_PEX_CFG_READ,
+	    RFIOS_PEX01_EVT_PEX_CFG_READ },
+	{ RFIOS_PEX01_S_EVT_PEX_MEM_WRITE,
+	    RFIOS_PEX01_EVT_PEX_MEM_WRITE },
+	{ RFIOS_PEX01_S_EVT_PEX_MEM_READ,
+	    RFIOS_PEX01_EVT_PEX_MEM_READ },
+	{ RFIOS_PEX01_S_EVT_PEU1_DMA_WR_REC,
+	    RFIOS_PEX01_EVT_PEU1_DMA_WR_REC },
+	{ RFIOS_PEX01_S_EVT_PEU1_PIO_RD_REC,
+	    RFIOS_PEX01_EVT_PEU1_PIO_RD_REC },
+	{ RFIOS_PEX01_S_EVT_PEU1_DMA_RD_SENT,
+	    RFIOS_PEX01_EVT_PEU1_DMA_RD_SENT },
+	{ RFIOS_PEX01_S_EVT_PEU1_TLP_REC,
+	    RFIOS_PEX01_EVT_PEU1_TLP_REC },
+	{ RFIOS_PEX01_S_EVT_PEU1_TRP_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU1_TRP_FULL_CYCLES },
+	{ RFIOS_PEX01_S_EVT_PEU1_TCH_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU1_TCH_FULL_CYCLES },
+	{ RFIOS_PEX01_S_EVT_PEU1_TCD_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU1_TCD_FULL_CYCLES },
+	{ COMMON_S_CLEAR_PIC,
+	    RFIOS_PEX_CTR_EVT_MASK }
+};
+
+static iospc_event_t rfios_pex_ctr_1_evts[] = {
+	{ RFIOS_PEX01_S_EVT_NONE,
+	    RFIOS_PEX01_EVT_NONE },
+	{ RFIOS_PEX01_S_EVT_CLK,
+	    RFIOS_PEX01_EVT_CLK },
+	{ RFIOS_PEX01_S_EVT_PEU0_DMA_WR_REC,
+	    RFIOS_PEX01_EVT_PEU0_DMA_WR_REC },
+	{ RFIOS_PEX01_S_EVT_PEU0_PIO_RD_REC,
+	    RFIOS_PEX01_EVT_PEU0_PIO_RD_REC },
+	{ RFIOS_PEX01_S_EVT_PEU0_DMA_RD_SENT,
+	    RFIOS_PEX01_EVT_PEU0_DMA_RD_SENT },
+	{ RFIOS_PEX01_S_EVT_PEU0_TLP_REC,
+	    RFIOS_PEX01_EVT_PEU0_TLP_REC },
+	{ RFIOS_PEX01_S_EVT_PEU0_TRP_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU0_TRP_FULL_CYCLES },
+	{ RFIOS_PEX01_S_EVT_PEU0_TCH_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU0_TCH_FULL_CYCLES },
+	{ RFIOS_PEX01_S_EVT_PEU0_TCD_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU0_TCD_FULL_CYCLES },
+	{ RFIOS_PEX01_S_EVT_NON_POSTED_PIOS_LATENCY,
+	    RFIOS_PEX01_EVT_NON_POSTED_PIOS_LATENCY },
+	{ RFIOS_PEX01_S_EVT_NON_POSTED_PIOS_NUM,
+	    RFIOS_PEX01_EVT_NON_POSTED_PIOS_NUM },
+	{ RFIOS_PEX01_S_EVT_PEX_CFG_WRITE,
+	    RFIOS_PEX01_EVT_PEX_CFG_WRITE },
+	{ RFIOS_PEX01_S_EVT_PEX_CFG_READ,
+	    RFIOS_PEX01_EVT_PEX_CFG_READ },
+	{ RFIOS_PEX01_S_EVT_PEX_MEM_WRITE,
+	    RFIOS_PEX01_EVT_PEX_MEM_WRITE },
+	{ RFIOS_PEX01_S_EVT_PEX_MEM_READ,
+	    RFIOS_PEX01_EVT_PEX_MEM_READ },
+	{ RFIOS_PEX01_S_EVT_PEU1_DMA_WR_REC,
+	    RFIOS_PEX01_EVT_PEU1_DMA_WR_REC },
+	{ RFIOS_PEX01_S_EVT_PEU1_PIO_RD_REC,
+	    RFIOS_PEX01_EVT_PEU1_PIO_RD_REC },
+	{ RFIOS_PEX01_S_EVT_PEU1_DMA_RD_SENT,
+	    RFIOS_PEX01_EVT_PEU1_DMA_RD_SENT },
+	{ RFIOS_PEX01_S_EVT_PEU1_TLP_REC,
+	    RFIOS_PEX01_EVT_PEU1_TLP_REC },
+	{ RFIOS_PEX01_S_EVT_PEU1_TRP_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU1_TRP_FULL_CYCLES },
+	{ RFIOS_PEX01_S_EVT_PEU1_TCH_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU1_TCH_FULL_CYCLES },
+	{ RFIOS_PEX01_S_EVT_PEU1_TCD_FULL_CYCLES,
+	    RFIOS_PEX01_EVT_PEU1_TCD_FULL_CYCLES },
+	{ COMMON_S_CLEAR_PIC,
+	    RFIOS_PEX_CTR_EVT_MASK }
+};
+
+static iospc_event_t rfios_peu_ctr_0_evts[] = {
+	{ RFIOS_PEU01_S_EVT_NONE,
+    RFIOS_PEU01_EVT_NONE },
+	{ RFIOS_PEU01_S_EVT_CLK,
+    RFIOS_PEU01_EVT_CLK },
+	{ RFIOS_PEU01_S_EVT_INT_CFG_WR_RECD,
+    RFIOS_PEU01_EVT_INT_CFG_WR_RECD },
+	{ RFIOS_PEU01_S_EVT_INT_CFG_RD_RECD,
+    RFIOS_PEU01_EVT_INT_CFG_RD_RECD },
+	{ RFIOS_PEU01_S_EVT_INT_MEM_WR_RECD,
+    RFIOS_PEU01_EVT_INT_MEM_WR_RECD },
+	{ RFIOS_PEU01_S_EVT_INT_MEM_RD_RECD,
+    RFIOS_PEU01_EVT_INT_MEM_RD_RECD },
+	{ RFIOS_PEU01_S_EVT_EXT_CFG_WR_RECD,
+    RFIOS_PEU01_EVT_EXT_CFG_WR_RECD },
+	{ RFIOS_PEU01_S_EVT_EXT_CFG_RD_RECD,
+    RFIOS_PEU01_EVT_EXT_CFG_RD_RECD },
+	{ RFIOS_PEU01_S_EVT_EXT_MEM_WR_RECD,
+    RFIOS_PEU01_EVT_EXT_MEM_WR_RECD },
+	{ RFIOS_PEU01_S_EVT_EXT_MEM_RD_RECD,
+    RFIOS_PEU01_EVT_EXT_MEM_RD_RECD },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_ALL,
+    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_ALL },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_1_15DW,
+    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_1_15DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_16_31DW,
+    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_16_31DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_32_63DW,
+    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_32_63DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_64_127DW,
+    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_64_127DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_128_255DW,
+    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_128_255DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_256_511DW,
+    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_256_511DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_512_1024DW,
+    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_512_1024DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_ALL,
+    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_ALL },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_1_15DW,
+    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_1_15DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_16_31DW,
+    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_16_31DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_32_63DW,
+    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_32_63DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_64_127DW,
+    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_64_127DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_128_255DW,
+    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_128_255DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_256_511DW,
+    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_256_511DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_512_1024DW,
+    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_512_1024DW },
+	{ RFIOS_PEU01_S_EVT_XMIT_POSTED_HDR_NA_CYC,
+    RFIOS_PEU01_EVT_XMIT_POSTED_HDR_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_POSTED_DATA_NA_CYC,
+    RFIOS_PEU01_EVT_XMIT_POSTED_DATA_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_NON_POSTED_HDR_NA_CYC,
+    RFIOS_PEU01_EVT_XMIT_NON_POSTED_HDR_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_NON_POSTED_DATA_NA_CYC,
+    RFIOS_PEU01_EVT_XMIT_NON_POSTED_DATA_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_COMPL_HDR_NA_CYC,
+    RFIOS_PEU01_EVT_XMIT_COMPL_HDR_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_COMPL_DATA_NA_CYC,
+    RFIOS_PEU01_EVT_XMIT_COMPL_DATA_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_NO_XMIT_CRED_CYC,
+    RFIOS_PEU01_EVT_NO_XMIT_CRED_CYC },
+	{ RFIOS_PEU01_S_EVT_RETRY_BUFF_NA_CYC,
+    RFIOS_PEU01_EVT_RETRY_BUFF_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_COMP_EXST_CYC,
+    RFIOS_PEU01_EVT_REC_FLCTRL_COMP_EXST_CYC },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_NPOST_EXST_CYC,
+    RFIOS_PEU01_EVT_REC_FLCTRL_NPOST_EXST_CYC },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_PST_DAT_EXST,
+    RFIOS_PEU01_EVT_REC_FLCTRL_PST_DAT_EXST },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_PST_DT_CDT_EXST,
+    RFIOS_PEU01_EVT_REC_FLCTRL_PST_DT_CDT_EXST },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_PST_CDT_EXST,
+    RFIOS_PEU01_EVT_REC_FLCTRL_PST_CDT_EXST },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_CDT_EXST,
+    RFIOS_PEU01_EVT_REC_FLCTRL_CDT_EXST },
+	{ RFIOS_PEU01_S_EVT_DLLP_CRC_ERRORS,
+    RFIOS_PEU01_EVT_DLLP_CRC_ERRORS },
+	{ RFIOS_PEU01_S_EVT_TLP_CRC_ERRORS,
+    RFIOS_PEU01_EVT_TLP_CRC_ERRORS },
+	{ RFIOS_PEU01_S_EVT_TLP_RECD_WITH_EDB,
+    RFIOS_PEU01_EVT_TLP_RECD_WITH_EDB },
+	{ RFIOS_PEU01_S_EVT_RECD_FC_TIMEOUT_ERROR,
+    RFIOS_PEU01_EVT_RECD_FC_TIMEOUT_ERROR },
+	{ RFIOS_PEU01_S_EVT_REPLAY_NUM_ROLLOVERS,
+    RFIOS_PEU01_EVT_REPLAY_NUM_ROLLOVERS },
+	{ RFIOS_PEU01_S_EVT_REPLAY_TIMER_TIMEOUTS,
+    RFIOS_PEU01_EVT_REPLAY_TIMER_TIMEOUTS },
+	{ RFIOS_PEU01_S_EVT_REPLAYS_INITIATED,
+    RFIOS_PEU01_EVT_REPLAYS_INITIATED },
+	{ RFIOS_PEU01_S_EVT_LTSSM_RECOVERY_CYC,
+    RFIOS_PEU01_EVT_LTSSM_RECOVERY_CYC },
+	{ RFIOS_PEU01_S_EVT_ENTRIES_LTSSM_RECOVERY,
+    RFIOS_PEU01_EVT_ENTRIES_LTSSM_RECOVERY },
+	{ RFIOS_PEU01_S_EVT_REC_L0S_STATE_CYC,
+    RFIOS_PEU01_EVT_REC_L0S_STATE_CYC },
+	{ RFIOS_PEU01_S_EVT_REC_L0S_STATE_TRANS,
+    RFIOS_PEU01_EVT_REC_L0S_STATE_TRANS },
+	{ RFIOS_PEU01_S_EVT_XMIT_L0S_STATE_CYC,
+    RFIOS_PEU01_EVT_XMIT_L0S_STATE_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_L0S_STATE_TRANS,
+    RFIOS_PEU01_EVT_XMIT_L0S_STATE_TRANS },
+	{ COMMON_S_CLEAR_PIC,
+    RFIOS_PEU_CTR_EVT_MASK }
+};
+
+static iospc_event_t rfios_peu_ctr_1_evts[] = {
+	{ RFIOS_PEU01_S_EVT_NONE,
+	    RFIOS_PEU01_EVT_NONE },
+	{ RFIOS_PEU01_S_EVT_CLK,
+	    RFIOS_PEU01_EVT_CLK },
+	{ RFIOS_PEU01_S_EVT_INT_CFG_WR_RECD,
+	    RFIOS_PEU01_EVT_INT_CFG_WR_RECD },
+	{ RFIOS_PEU01_S_EVT_INT_CFG_RD_RECD,
+	    RFIOS_PEU01_EVT_INT_CFG_RD_RECD },
+	{ RFIOS_PEU01_S_EVT_INT_MEM_WR_RECD,
+	    RFIOS_PEU01_EVT_INT_MEM_WR_RECD },
+	{ RFIOS_PEU01_S_EVT_INT_MEM_RD_RECD,
+	    RFIOS_PEU01_EVT_INT_MEM_RD_RECD },
+	{ RFIOS_PEU01_S_EVT_EXT_CFG_WR_RECD,
+	    RFIOS_PEU01_EVT_EXT_CFG_WR_RECD },
+	{ RFIOS_PEU01_S_EVT_EXT_CFG_RD_RECD,
+	    RFIOS_PEU01_EVT_EXT_CFG_RD_RECD },
+	{ RFIOS_PEU01_S_EVT_EXT_MEM_WR_RECD,
+	    RFIOS_PEU01_EVT_EXT_MEM_WR_RECD },
+	{ RFIOS_PEU01_S_EVT_EXT_MEM_RD_RECD,
+	    RFIOS_PEU01_EVT_EXT_MEM_RD_RECD },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_ALL,
+	    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_ALL },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_1_15DW,
+	    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_1_15DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_16_31DW,
+	    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_16_31DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_32_63DW,
+	    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_32_63DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_64_127DW,
+	    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_64_127DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_128_255DW,
+	    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_128_255DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_256_511DW,
+	    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_256_511DW },
+	{ RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_512_1024DW,
+	    RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_512_1024DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_ALL,
+	    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_ALL },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_1_15DW,
+	    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_1_15DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_16_31DW,
+	    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_16_31DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_32_63DW,
+	    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_32_63DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_64_127DW,
+	    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_64_127DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_128_255DW,
+	    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_128_255DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_256_511DW,
+	    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_256_511DW },
+	{ RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_512_1024DW,
+	    RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_512_1024DW },
+	{ RFIOS_PEU01_S_EVT_XMIT_POSTED_HDR_NA_CYC,
+	    RFIOS_PEU01_EVT_XMIT_POSTED_HDR_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_POSTED_DATA_NA_CYC,
+	    RFIOS_PEU01_EVT_XMIT_POSTED_DATA_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_NON_POSTED_HDR_NA_CYC,
+	    RFIOS_PEU01_EVT_XMIT_NON_POSTED_HDR_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_NON_POSTED_DATA_NA_CYC,
+	    RFIOS_PEU01_EVT_XMIT_NON_POSTED_DATA_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_COMPL_HDR_NA_CYC,
+	    RFIOS_PEU01_EVT_XMIT_COMPL_HDR_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_COMPL_DATA_NA_CYC,
+	    RFIOS_PEU01_EVT_XMIT_COMPL_DATA_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_NO_XMIT_CRED_CYC,
+	    RFIOS_PEU01_EVT_NO_XMIT_CRED_CYC },
+	{ RFIOS_PEU01_S_EVT_RETRY_BUFF_NA_CYC,
+	    RFIOS_PEU01_EVT_RETRY_BUFF_NA_CYC },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_COMP_EXST_CYC,
+	    RFIOS_PEU01_EVT_REC_FLCTRL_COMP_EXST_CYC },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_NPOST_EXST_CYC,
+	    RFIOS_PEU01_EVT_REC_FLCTRL_NPOST_EXST_CYC },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_PST_DAT_EXST,
+	    RFIOS_PEU01_EVT_REC_FLCTRL_PST_DAT_EXST },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_PST_DT_CDT_EXST,
+	    RFIOS_PEU01_EVT_REC_FLCTRL_PST_DT_CDT_EXST },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_PST_CDT_EXST,
+	    RFIOS_PEU01_EVT_REC_FLCTRL_PST_CDT_EXST },
+	{ RFIOS_PEU01_S_EVT_REC_FLCTRL_CDT_EXST,
+	    RFIOS_PEU01_EVT_REC_FLCTRL_CDT_EXST },
+	{ RFIOS_PEU01_S_EVT_DLLP_CRC_ERRORS,
+	    RFIOS_PEU01_EVT_DLLP_CRC_ERRORS },
+	{ RFIOS_PEU01_S_EVT_TLP_CRC_ERRORS,
+	    RFIOS_PEU01_EVT_TLP_CRC_ERRORS },
+	{ RFIOS_PEU01_S_EVT_TLP_RECD_WITH_EDB,
+	    RFIOS_PEU01_EVT_TLP_RECD_WITH_EDB },
+	{ RFIOS_PEU01_S_EVT_RECD_FC_TIMEOUT_ERROR,
+	    RFIOS_PEU01_EVT_RECD_FC_TIMEOUT_ERROR },
+	{ RFIOS_PEU01_S_EVT_REPLAY_NUM_ROLLOVERS,
+	    RFIOS_PEU01_EVT_REPLAY_NUM_ROLLOVERS },
+	{ RFIOS_PEU01_S_EVT_REPLAY_TIMER_TIMEOUTS,
+	    RFIOS_PEU01_EVT_REPLAY_TIMER_TIMEOUTS },
+	{ RFIOS_PEU01_S_EVT_REPLAYS_INITIATED,
+	    RFIOS_PEU01_EVT_REPLAYS_INITIATED },
+	{ RFIOS_PEU01_S_EVT_LTSSM_RECOVERY_CYC,
+	    RFIOS_PEU01_EVT_LTSSM_RECOVERY_CYC },
+	{ RFIOS_PEU01_S_EVT_ENTRIES_LTSSM_RECOVERY,
+	    RFIOS_PEU01_EVT_ENTRIES_LTSSM_RECOVERY },
+	{ RFIOS_PEU01_S_EVT_REC_L0S_STATE_CYC,
+	    RFIOS_PEU01_EVT_REC_L0S_STATE_CYC },
+	{ RFIOS_PEU01_S_EVT_REC_L0S_STATE_TRANS,
+	    RFIOS_PEU01_EVT_REC_L0S_STATE_TRANS },
+	{ RFIOS_PEU01_S_EVT_XMIT_L0S_STATE_CYC,
+	    RFIOS_PEU01_EVT_XMIT_L0S_STATE_CYC },
+	{ RFIOS_PEU01_S_EVT_XMIT_L0S_STATE_TRANS,
+	    RFIOS_PEU01_EVT_XMIT_L0S_STATE_TRANS },
+	{ COMMON_S_CLEAR_PIC,
+	    RFIOS_PEU_CTR_EVT_MASK }
+};
+
+
+static iospc_regsel_fld_t rfios_imu_regsel_flds[] = {
+	{ rfios_imu_ctr_0_evts, NUM_EVTS(rfios_imu_ctr_0_evts),
+	    RFIOS_IMU_CTR_EVT_MASK, RFIOS_IMU_CTR_0_EVT_OFF },
+	{ rfios_imu_ctr_1_evts, NUM_EVTS(rfios_imu_ctr_1_evts),
+	    RFIOS_IMU_CTR_EVT_MASK, RFIOS_IMU_CTR_1_EVT_OFF }
+};
+
+static iospc_regsel_fld_t rfios_atu_regsel_flds[] = {
+	{ rfios_atu_ctr_0_evts, NUM_EVTS(rfios_atu_ctr_0_evts),
+	    RFIOS_ATU_CTR_EVT_MASK, RFIOS_ATU_CTR_0_EVT_OFF },
+	{ rfios_atu_ctr_1_evts, NUM_EVTS(rfios_atu_ctr_1_evts),
+	    RFIOS_ATU_CTR_EVT_MASK, RFIOS_ATU_CTR_1_EVT_OFF }
+};
+
+static iospc_regsel_fld_t rfios_npu_regsel_flds[] = {
+	{ rfios_npu_ctr_0_evts, NUM_EVTS(rfios_npu_ctr_0_evts),
+	    RFIOS_NPU_CTR_EVT_MASK, RFIOS_NPU_CTR_0_EVT_OFF },
+	{ rfios_npu_ctr_1_evts, NUM_EVTS(rfios_npu_ctr_1_evts),
+	    RFIOS_NPU_CTR_EVT_MASK, RFIOS_NPU_CTR_1_EVT_OFF }
+};
+
+static iospc_regsel_fld_t rfios_pex_regsel_flds[] = {
+	{ rfios_pex_ctr_0_evts, NUM_EVTS(rfios_pex_ctr_0_evts),
+	    RFIOS_PEX_CTR_EVT_MASK, RFIOS_PEX_CTR_0_EVT_OFF },
+	{ rfios_pex_ctr_1_evts, NUM_EVTS(rfios_pex_ctr_1_evts),
+	    RFIOS_PEX_CTR_EVT_MASK, RFIOS_PEX_CTR_1_EVT_OFF }
+};
+static iospc_regsel_fld_t rfios_peu_regsel_flds[] = {
+	{ rfios_peu_ctr_0_evts, NUM_EVTS(rfios_peu_ctr_0_evts),
+	    RFIOS_PEU_CTR_EVT_MASK, RFIOS_PEU_CTR_0_EVT_OFF },
+	{ rfios_peu_ctr_1_evts, NUM_EVTS(rfios_peu_ctr_1_evts),
+	    RFIOS_PEU_CTR_EVT_MASK, RFIOS_PEU_CTR_1_EVT_OFF }
+};
+
+static iospc_regsel_t rfios_imu_regsel = {
+	HVIO_RFIOS_PERFREG_IMU_SEL,
+	rfios_imu_regsel_flds,
+	NUM_FLDS(rfios_imu_regsel_flds)
+};
+
+static iospc_regsel_t rfios_atu_regsel = {
+	HVIO_RFIOS_PERFREG_ATU_SEL,
+	rfios_atu_regsel_flds,
+	NUM_FLDS(rfios_atu_regsel_flds)
+};
+
+static iospc_regsel_t rfios_npu_regsel = {
+	HVIO_RFIOS_PERFREG_NPU_SEL,
+	rfios_npu_regsel_flds,
+	NUM_FLDS(rfios_npu_regsel_flds)
+};
+
+static iospc_regsel_t rfios_pex_regsel = {
+	HVIO_RFIOS_PERFREG_PEX_SEL,
+	rfios_pex_regsel_flds,
+	NUM_FLDS(rfios_pex_regsel_flds)
+};
+
+static iospc_regsel_t rfios_peu0_regsel = {
+	HVIO_RFIOS_PERFREG_PEU0_SEL,
+	rfios_peu_regsel_flds,
+	NUM_FLDS(rfios_peu_regsel_flds)
+};
+
+static iospc_regsel_t rfios_peu1_regsel = {
+	HVIO_RFIOS_PERFREG_PEU1_SEL,
+	rfios_peu_regsel_flds,
+	NUM_FLDS(rfios_peu_regsel_flds)
+};
+
+/* reg off, reg size, field mask */
+static iospc_cntr_t rfios_imu_cntrs[] = {
+	{ HVIO_RFIOS_PERFREG_IMU_CNT0, FULL64BIT,
+		HVIO_RFIOS_PERFREG_IMU_CNT0, 0ULL},
+	{ HVIO_RFIOS_PERFREG_IMU_CNT1, FULL64BIT,
+		HVIO_RFIOS_PERFREG_IMU_CNT1, 0ULL}
+};
+
+static iospc_cntr_t rfios_atu_cntrs[] = {
+	{ HVIO_RFIOS_PERFREG_ATU_CNT0, FULL64BIT,
+		HVIO_RFIOS_PERFREG_ATU_CNT0, 0ULL},
+	{ HVIO_RFIOS_PERFREG_ATU_CNT1, FULL64BIT,
+		HVIO_RFIOS_PERFREG_ATU_CNT1, 0ULL}
+};
+
+static iospc_cntr_t rfios_npu_cntrs[] = {
+	{ HVIO_RFIOS_PERFREG_NPU_CNT0, FULL64BIT,
+		HVIO_RFIOS_PERFREG_NPU_CNT0, 0ULL},
+	{ HVIO_RFIOS_PERFREG_NPU_CNT1, FULL64BIT,
+		HVIO_RFIOS_PERFREG_NPU_CNT1, 0ULL}
+};
+
+static iospc_cntr_t rfios_pex_cntrs[] = {
+	{ HVIO_RFIOS_PERFREG_PEX_CNT0, FULL64BIT,
+		HVIO_RFIOS_PERFREG_PEX_CNT0, 0ULL},
+	{ HVIO_RFIOS_PERFREG_PEX_CNT1, FULL64BIT,
+		HVIO_RFIOS_PERFREG_PEX_CNT1, 0ULL}
+};
+
+static iospc_cntr_t rfios_peu0_cntrs[] = {
+	{ HVIO_RFIOS_PERFREG_PEU0_CNT0, FULL64BIT,
+		HVIO_RFIOS_PERFREG_PEU0_CNT0, 0ULL},
+	{ HVIO_RFIOS_PERFREG_PEU0_CNT1, FULL64BIT,
+		HVIO_RFIOS_PERFREG_PEU0_CNT1, 0ULL}
+};
+
+static iospc_cntr_t rfios_peu1_cntrs[] = {
+	{ HVIO_RFIOS_PERFREG_PEU1_CNT0, FULL64BIT,
+		HVIO_RFIOS_PERFREG_PEU0_CNT0, 0ULL},
+	{ HVIO_RFIOS_PERFREG_PEU1_CNT1, FULL64BIT,
+		HVIO_RFIOS_PERFREG_PEU0_CNT1, 0ULL}
+};
+
+static iospc_grp_t rfios_imu_grp = {
+	"imu",
+	&rfios_imu_regsel,
+	rfios_imu_cntrs,
+	NUM_CTRS(rfios_imu_cntrs),
+	rfios_access_init,
+	rfios_access_hv,
+	rfios_access_fini,
+	NULL		/* Name kstats pointer, filled in at runtime. */
+};
+
+static iospc_grp_t rfios_atu_grp = {
+	"atu",
+	&rfios_atu_regsel,
+	rfios_atu_cntrs,
+	NUM_CTRS(rfios_atu_cntrs),
+	rfios_access_init,
+	rfios_access_hv,
+	rfios_access_fini,
+	NULL		/* Name kstats pointer, filled in at runtime. */
+};
+
+static iospc_grp_t rfios_npu_grp = {
+	"npu",
+	&rfios_npu_regsel,
+	rfios_npu_cntrs,
+	NUM_CTRS(rfios_npu_cntrs),
+	rfios_access_init,
+	rfios_access_hv,
+	rfios_access_fini,
+	NULL		/* Name kstats pointer, filled in at runtime. */
+};
+
+static iospc_grp_t rfios_pex_grp = {
+	"pex",
+	&rfios_pex_regsel,
+	rfios_pex_cntrs,
+	NUM_CTRS(rfios_pex_cntrs),
+	rfios_access_init,
+	rfios_access_hv,
+	rfios_access_fini,
+	NULL		/* Name kstats pointer, filled in at runtime. */
+};
+
+static iospc_grp_t rfios_peuzero_grp = {
+	"peuzero",
+	&rfios_peu0_regsel,
+	rfios_peu0_cntrs,
+	NUM_CTRS(rfios_peu0_cntrs),
+	rfios_access_init,
+	rfios_access_hv,
+	rfios_access_fini,
+	NULL		/* Name kstats pointer, filled in at runtime. */
+};
+static iospc_grp_t rfios_peuone_grp = {
+	"peuone",
+	&rfios_peu1_regsel,
+	rfios_peu1_cntrs,
+	NUM_CTRS(rfios_peu1_cntrs),
+	rfios_access_init,
+	rfios_access_hv,
+	rfios_access_fini,
+	NULL		/* Name kstats pointer, filled in at runtime. */
+};
+
+iospc_grp_t *rfios_leaf_grps[] = {
+	&rfios_imu_grp,
+	&rfios_atu_grp,
+	&rfios_npu_grp,
+	&rfios_pex_grp,
+	&rfios_peuzero_grp,
+	&rfios_peuone_grp,
+	NULL
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/io/iospc/rfios_tables.h	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,460 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	_RFIOSPC_TABLES_H
+#define	_RFIOSPC_TABLES_H
+
+/*
+ * Table definitions for the RF IOS performance counters.
+ *
+ * Each table consists of one or more groups of counters.
+ *
+ * A counter group will have a name (used by busstat as the kstat "module"
+ * name), have its own set of kstats, and a common event select register.
+ * A group is represented as an iospc_grp_t.
+ *
+ * Each counter is represented by an iospc_cntr_t.  Each has its own register
+ * offset (or address), bits for the data it represents, plus an associated
+ * register for zeroing it.
+ *
+ * All registers for iospc are 64 bit, but a size field can be entered into this
+ * structure if registers sizes vary for other implementations (as if this code
+ * is leveraged for a future driver).
+ *
+ * A select register is represented by an iospc_regsel_t.  This defines the
+ * offset or address, and an array of fields which define the events for each
+ * counter it services.  All counters need to have an entry in the fields array
+ * even if they don't have any representation in a select register.  Please see
+ * the explanation of the events array (below) for more information.  Counters
+ * without representation in a select register can specify their (non-existant)
+ * select register field with mask NONPROG_DUMMY_MASK and offset
+ * NONPROG_DUMMY_OFF.
+ *
+ * This implementation supports only one select register per group.  If more
+ * are needed (e.g. if this implementation is used as a template for another
+ * device which has multiple select registers per group) the data structures can
+ * easily be changed to support an array of them.   Add an array index in the
+ * counter structure to associate that counter with a particular select
+ * register, and add a field for the number of select registers in the group
+ * structure.
+ *
+ * Each counter has an array of programmable events associated with it, even if
+ * it is not programmable.  This array is a series of name/value pairs defined
+ * by iospc_event_t.  The value is the event value loaded into the select
+ * register to select that event for that counter.  The last entry in the array
+ * is always an entry with a bitmask of LSB-aligned bits of that counter's
+ * select register's field's width;  it is usually called the CLEAR_PIC entry.
+ * CLEAR_PIC entries are not shown to the user.
+ *
+ * Note that counters without programmable events still need to define a
+ * (small) events array with at least CLEAR_PIC and a single event, so that
+ * event's name can display in busstat output.  The CLEAR_PIC entry of
+ * nonprogrammable counters can have a value of NONPROG_DUMMY_MASK.
+ */
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/kstat.h>
+
+/* RF IOS specific definitions. */
+
+/*
+ * Event bitmask definitions for all groups.
+ */
+#define	RFIOS_IMU_CTR_EVT_MASK	0xffull
+#define	RFIOS_IMU_CTR_0_EVT_OFF	0
+#define	RFIOS_IMU_CTR_1_EVT_OFF	8
+
+#define	RFIOS_ATU_CTR_EVT_MASK	0xffull
+#define	RFIOS_ATU_CTR_0_EVT_OFF	0
+#define	RFIOS_ATU_CTR_1_EVT_OFF	8
+
+#define	RFIOS_NPU_CTR_EVT_MASK	0xffull
+#define	RFIOS_NPU_CTR_0_EVT_OFF	0
+#define	RFIOS_NPU_CTR_1_EVT_OFF	8
+
+#define	RFIOS_PEX_CTR_EVT_MASK	0xffull
+#define	RFIOS_PEX_CTR_0_EVT_OFF	0
+#define	RFIOS_PEX_CTR_1_EVT_OFF	8
+
+#define	RFIOS_PEU_CTR_EVT_MASK	0x7full
+#define	RFIOS_PEU_CTR_0_EVT_OFF	0
+#define	RFIOS_PEU_CTR_1_EVT_OFF	32
+
+/*
+ * Definitions of the different types of events.
+ *
+ * The first part says which registers these events are for.
+ * For example, IMU01 means the IMU performance counters 0 and 1
+ */
+
+/* String sought by busstat to locate the event field width "event" entry. */
+#define	COMMON_S_CLEAR_PIC			"clear_pic"
+
+#define	RFIOS_IMU01_S_EVT_NONE			"event_none"
+#define	RFIOS_IMU01_S_EVT_CLK			"clock_cyc"
+#define	RFIOS_IMU01_S_EVT_TOTAL_MSIX		"total_msix"
+#define	RFIOS_IMU01_S_EVT_IOS_MSI		"ios_msi"
+#define	RFIOS_IMU01_S_EVT_PCIE_MSIX		"pcie_msix"
+#define	RFIOS_IMU01_S_EVT_PCIE_MSGS		"pcie_msgs"
+#define	RFIOS_IMU01_S_EVT_FILTERED_MSIX		"filtered_msix"
+#define	RFIOS_IMU01_S_EVT_EQ_WR			"eq_write"
+#define	RFIOS_IMU01_S_EVT_MONDOS		"mondos"
+
+#define	RFIOS_IMU01_EVT_NONE			0x0
+#define	RFIOS_IMU01_EVT_CLK			0x1
+#define	RFIOS_IMU01_EVT_TOTAL_MSIX		0x2
+#define	RFIOS_IMU01_EVT_IOS_MSI			0x3
+#define	RFIOS_IMU01_EVT_PCIE_MSIX		0x4
+#define	RFIOS_IMU01_EVT_PCIE_MSGS		0x5
+#define	RFIOS_IMU01_EVT_FILTERED_MSIX		0x6
+#define	RFIOS_IMU01_EVT_EQ_WR			0x7
+#define	RFIOS_IMU01_EVT_MONDOS			0x8
+
+#define	RFIOS_ATU01_S_EVT_NONE			"event_none"
+#define	RFIOS_ATU01_S_EVT_CLK			"clock_cyc"
+#define	RFIOS_ATU01_S_EVT_FLOW_CTRL_STALL	"flow_ctrl_cyc"
+#define	RFIOS_ATU01_S_EVT_CLUMP_ACC		"clump_accesses"
+#define	RFIOS_ATU01_S_EVT_CLUMP_MISS		"clump_misses"
+#define	RFIOS_ATU01_S_EVT_CLUMP_RESETS		"clump_resets"
+#define	RFIOS_ATU01_S_EVT_CLUMP_TBL_WALK	"clump_table_walk"
+#define	RFIOS_ATU01_S_EVT_VIRT_ACC		"virt_accesses"
+#define	RFIOS_ATU01_S_EVT_VIRT_MISS		"virt_misses"
+#define	RFIOS_ATU01_S_EVT_VIRT_RESETS		"virt_resets"
+#define	RFIOS_ATU01_S_EVT_VIRT_TBL_WALK		"virt_table_walk"
+#define	RFIOS_ATU01_S_EVT_REAL_ACC		"real_accesses"
+#define	RFIOS_ATU01_S_EVT_REAL_MISS		"real_misses"
+#define	RFIOS_ATU01_S_EVT_REAL_RESETS		"real_resets"
+#define	RFIOS_ATU01_S_EVT_REAL_TBL_WALK		"real_table_walk"
+#define	RFIOS_ATU01_S_EVT_CMD_ERRORS		"cmd_errors"
+#define	RFIOS_ATU01_S_EVT_VIRT_TRANS		"virt_trans"
+#define	RFIOS_ATU01_S_EVT_REAL_TRANS		"real_trans"
+#define	RFIOS_ATU01_S_EVT_PHYS_TRANS		"phys_trans"
+#define	RFIOS_ATU01_S_EVT_STRICT_ORDER_FORCED	"str_order_forced"
+#define	RFIOS_ATU01_S_EVT_RELAX_ORDER_FORCED	"relax_order_forced"
+#define	RFIOS_ATU01_S_EVT_RELAX_ORDER_TLP	"relax_order_tlp"
+#define	RFIOS_ATU01_S_EVT_RELAX_ORDER_TOTAL	"relax_order_total"
+
+#define	RFIOS_ATU01_EVT_NONE			0x0
+#define	RFIOS_ATU01_EVT_CLK			0x1
+#define	RFIOS_ATU01_EVT_FLOW_CTRL_STALL		0x3
+#define	RFIOS_ATU01_EVT_CLUMP_ACC		0x4
+#define	RFIOS_ATU01_EVT_CLUMP_MISS		0x5
+#define	RFIOS_ATU01_EVT_CLUMP_RESETS		0x6
+#define	RFIOS_ATU01_EVT_CLUMP_TBL_WALK		0x7
+#define	RFIOS_ATU01_EVT_VIRT_ACC		0x8
+#define	RFIOS_ATU01_EVT_VIRT_MISS		0x9
+#define	RFIOS_ATU01_EVT_VIRT_RESETS		0xa
+#define	RFIOS_ATU01_EVT_VIRT_TBL_WALK		0xb
+#define	RFIOS_ATU01_EVT_REAL_ACC		0xc
+#define	RFIOS_ATU01_EVT_REAL_MISS		0xd
+#define	RFIOS_ATU01_EVT_REAL_RESETS		0xe
+#define	RFIOS_ATU01_EVT_REAL_TBL_WALK		0xf
+#define	RFIOS_ATU01_EVT_CMD_ERRORS		0x10
+#define	RFIOS_ATU01_EVT_VIRT_TRANS		0x11
+#define	RFIOS_ATU01_EVT_REAL_TRANS		0x12
+#define	RFIOS_ATU01_EVT_PHYS_TRANS		0x13
+#define	RFIOS_ATU01_EVT_STRICT_ORDER_FORCED	0x14
+#define	RFIOS_ATU01_EVT_RELAX_ORDER_FORCED	0x15
+#define	RFIOS_ATU01_EVT_RELAX_ORDER_TLP		0x16
+#define	RFIOS_ATU01_EVT_RELAX_ORDER_TOTAL	0x17
+
+#define	RFIOS_NPU01_S_EVT_NONE			"event_none"
+#define	RFIOS_NPU01_S_EVT_CLK			"clock_cyc"
+#define	RFIOS_NPU01_S_EVT_ZERO_BYTE_READ	"zero_byte_reads"
+#define	RFIOS_NPU01_S_EVT_DMA_WRITE_LATENCY	"write_latency"
+#define	RFIOS_NPU01_S_EVT_DMA_WRITE_LATENCY_NUM	"write_latency_num"
+#define	RFIOS_NPU01_S_EVT_OSB_FULL_CYCLES	"osb_full_cyc"
+#define	RFIOS_NPU01_S_EVT_DMA_READ_LATENCY	"read_latency"
+#define	RFIOS_NPU01_S_EVT_DMA_READ_LATENCY_NUM	"read_latency_num"
+#define	RFIOS_NPU01_S_EVT_PSB_FULL_CYCLES	"psb_full_cyc"
+#define	RFIOS_NPU01_S_EVT_ICB_FULL_CYCLES	"icb_full_cyc"
+#define	RFIOS_NPU01_S_EVT_ECB_FULL_CYCLES	"ecb_full_cyc"
+#define	RFIOS_NPU01_S_EVT_ATU_CSR_CFG_WRITES	"atu_csr_cfg_wrs"
+#define	RFIOS_NPU01_S_EVT_ATU_CSR_CFG_READS	"atu_csr_cfg_rds"
+#define	RFIOS_NPU01_S_EVT_ATU_CSR_MEM_WRITES	"atu_csr_mem_wrs"
+#define	RFIOS_NPU01_S_EVT_ATU_CSR_MEM_READS	"atu_csr_mem_rds"
+#define	RFIOS_NPU01_S_EVT_IMU_CSR_CFG_WRITES	"imu_csr_cfg_wrs"
+#define	RFIOS_NPU01_S_EVT_IMU_CSR_CFG_READS	"imu_csr_cfg_rds"
+#define	RFIOS_NPU01_S_EVT_IMU_CSR_MEM_WRITES	"imu_csr_mem_wrs"
+#define	RFIOS_NPU01_S_EVT_IMU_CSR_MEM_READS	"imu_csr_mem_rds"
+#define	RFIOS_NPU01_S_EVT_NPU_CSR_CFG_WRITES	"npu_csr_cfg_wrs"
+#define	RFIOS_NPU01_S_EVT_NPU_CSR_CFG_READS	"npu_csr_cfg_rds"
+#define	RFIOS_NPU01_S_EVT_NPU_CSR_MEM_WRITES	"npu_csr_mem_wrs"
+#define	RFIOS_NPU01_S_EVT_NPU_CSR_MEM_READS	"npu_csr_mem_rds"
+#define	RFIOS_NPU01_S_EVT_OTHER_CSR_CFG_WRITES	"other_csr_cfg_wrs"
+#define	RFIOS_NPU01_S_EVT_OTHER_CSR_CFG_READS	"other_csr_cfg_rds"
+#define	RFIOS_NPU01_S_EVT_OTHER_CSR_MEM64_WRITES \
+						"other_csr_mem64_wrs"
+#define	RFIOS_NPU01_S_EVT_OTHER_CSR_MEM64_READS	"other_csr_mem64_rds"
+#define	RFIOS_NPU01_S_EVT_OTHER_CSR_MEM32_WRITES \
+						"other_csr_mem32_wrs"
+#define	RFIOS_NPU01_S_EVT_OTHER_CSR_MEM32_READS	"other_csr_mem32_rds"
+#define	RFIOS_NPU01_S_EVT_IO_SPACE_WRITES	"io_space_wrs"
+#define	RFIOS_NPU01_S_EVT_IO_SPACE_READS	"io_space_rds"
+#define	RFIOS_NPU01_S_EVT_TOTAL_MSI		"total_msi"
+#define	RFIOS_NPU01_S_EVT_ATU_MSI		"atu_msi"
+#define	RFIOS_NPU01_S_EVT_IMU_MSI		"imu_msi"
+#define	RFIOS_NPU01_S_EVT_NPU_MSI		"npu_msi"
+#define	RFIOS_NPU01_S_EVT_RETIRED_TAGS_CTO	"retired_tags"
+#define	RFIOS_NPU01_S_EVT_NO_POSTED_TAGS_CYCYLES \
+						"no_posted_tags_cyc"
+
+#define	RFIOS_NPU01_EVT_NONE			0
+#define	RFIOS_NPU01_EVT_CLK			1
+#define	RFIOS_NPU01_EVT_ZERO_BYTE_READ		2
+#define	RFIOS_NPU01_EVT_DMA_WRITE_LATENCY	3
+#define	RFIOS_NPU01_EVT_DMA_WRITE_LATENCY_NUM	4
+#define	RFIOS_NPU01_EVT_OSB_FULL_CYCLES		5
+#define	RFIOS_NPU01_EVT_DMA_READ_LATENCY	8
+#define	RFIOS_NPU01_EVT_DMA_READ_LATENCY_NUM	9
+#define	RFIOS_NPU01_EVT_PSB_FULL_CYCLES		10
+#define	RFIOS_NPU01_EVT_ICB_FULL_CYCLES		16
+#define	RFIOS_NPU01_EVT_ECB_FULL_CYCLES		24
+#define	RFIOS_NPU01_EVT_ATU_CSR_CFG_WRITES	32
+#define	RFIOS_NPU01_EVT_ATU_CSR_CFG_READS	33
+#define	RFIOS_NPU01_EVT_ATU_CSR_MEM_WRITES	34
+#define	RFIOS_NPU01_EVT_ATU_CSR_MEM_READS	35
+#define	RFIOS_NPU01_EVT_IMU_CSR_CFG_WRITES	36
+#define	RFIOS_NPU01_EVT_IMU_CSR_CFG_READS	37
+#define	RFIOS_NPU01_EVT_IMU_CSR_MEM_WRITES	38
+#define	RFIOS_NPU01_EVT_IMU_CSR_MEM_READS	39
+#define	RFIOS_NPU01_EVT_NPU_CSR_CFG_WRITES	40
+#define	RFIOS_NPU01_EVT_NPU_CSR_CFG_READS	41
+#define	RFIOS_NPU01_EVT_NPU_CSR_MEM_WRITES	42
+#define	RFIOS_NPU01_EVT_NPU_CSR_MEM_READS	43
+#define	RFIOS_NPU01_EVT_OTHER_CSR_CFG_WRITES	44
+#define	RFIOS_NPU01_EVT_OTHER_CSR_CFG_READS	45
+#define	RFIOS_NPU01_EVT_OTHER_CSR_MEM64_WRITES	46
+#define	RFIOS_NPU01_EVT_OTHER_CSR_MEM64_READS	47
+#define	RFIOS_NPU01_EVT_OTHER_CSR_MEM32_WRITES	48
+#define	RFIOS_NPU01_EVT_OTHER_CSR_MEM32_READS	49
+#define	RFIOS_NPU01_EVT_IO_SPACE_WRITES		50
+#define	RFIOS_NPU01_EVT_IO_SPACE_READS		51
+#define	RFIOS_NPU01_EVT_TOTAL_MSI		52
+#define	RFIOS_NPU01_EVT_ATU_MSI			53
+#define	RFIOS_NPU01_EVT_IMU_MSI			54
+#define	RFIOS_NPU01_EVT_NPU_MSI			55
+#define	RFIOS_NPU01_EVT_RETIRED_TAGS_CTO	56
+#define	RFIOS_NPU01_EVT_NO_POSTED_TAGS_CYCYLES	57
+
+#define	RFIOS_PEX01_S_EVT_NONE			"event_none"
+#define	RFIOS_PEX01_S_EVT_CLK			"clock_cyc"
+#define	RFIOS_PEX01_S_EVT_PEU0_DMA_WR_REC	"peu0_dma_wr_received"
+#define	RFIOS_PEX01_S_EVT_PEU0_PIO_RD_REC	"peu0_pio_rd_received"
+#define	RFIOS_PEX01_S_EVT_PEU0_DMA_RD_SENT	"peu0_dma_rd_sent"
+#define	RFIOS_PEX01_S_EVT_PEU0_TLP_REC		"peu0_tlp_recieved"
+#define	RFIOS_PEX01_S_EVT_PEU0_TRP_FULL_CYCLES	"peu0_trp_full_cyc"
+#define	RFIOS_PEX01_S_EVT_PEU0_TCH_FULL_CYCLES	"peu0_tch_full_cyc"
+#define	RFIOS_PEX01_S_EVT_PEU0_TCD_FULL_CYCLES	"peu0_tcd_full_cyc"
+#define	RFIOS_PEX01_S_EVT_NON_POSTED_PIOS_LATENCY \
+						"non_posted_pios_latency"
+#define	RFIOS_PEX01_S_EVT_NON_POSTED_PIOS_NUM	"non_posted_pios_num"
+#define	RFIOS_PEX01_S_EVT_PEX_CFG_WRITE		"pex_config_wr"
+#define	RFIOS_PEX01_S_EVT_PEX_CFG_READ		"pex_config_rd"
+#define	RFIOS_PEX01_S_EVT_PEX_MEM_WRITE		"pex_mem_wr"
+#define	RFIOS_PEX01_S_EVT_PEX_MEM_READ		"pex_mem_rd"
+#define	RFIOS_PEX01_S_EVT_PEU1_DMA_WR_REC	"peu1_dma_wr_received"
+#define	RFIOS_PEX01_S_EVT_PEU1_PIO_RD_REC	"peu1_pio_rd_received"
+#define	RFIOS_PEX01_S_EVT_PEU1_DMA_RD_SENT	"peu1_dma_rd_sent"
+#define	RFIOS_PEX01_S_EVT_PEU1_TLP_REC		"peu1_tlp_recieved"
+#define	RFIOS_PEX01_S_EVT_PEU1_TRP_FULL_CYCLES	"peu1_trp_full_cyc"
+#define	RFIOS_PEX01_S_EVT_PEU1_TCH_FULL_CYCLES	"peu1_tch_full_cyc"
+#define	RFIOS_PEX01_S_EVT_PEU1_TCD_FULL_CYCLES	"peu1_tcd_full_cyc"
+
+#define	RFIOS_PEX01_EVT_NONE			0x0
+#define	RFIOS_PEX01_EVT_CLK			0x1
+#define	RFIOS_PEX01_EVT_PEU0_DMA_WR_REC		0x2
+#define	RFIOS_PEX01_EVT_PEU0_PIO_RD_REC		0x3
+#define	RFIOS_PEX01_EVT_PEU0_DMA_RD_SENT	0x4
+#define	RFIOS_PEX01_EVT_PEU0_TLP_REC		0x5
+#define	RFIOS_PEX01_EVT_PEU0_TRP_FULL_CYCLES	0x6
+#define	RFIOS_PEX01_EVT_PEU0_TCH_FULL_CYCLES	0x7
+#define	RFIOS_PEX01_EVT_PEU0_TCD_FULL_CYCLES	0x8
+#define	RFIOS_PEX01_EVT_NON_POSTED_PIOS_LATENCY	0x9
+#define	RFIOS_PEX01_EVT_NON_POSTED_PIOS_NUM	0xa
+#define	RFIOS_PEX01_EVT_PEX_CFG_WRITE		0xb
+#define	RFIOS_PEX01_EVT_PEX_CFG_READ		0xc
+#define	RFIOS_PEX01_EVT_PEX_MEM_WRITE		0xd
+#define	RFIOS_PEX01_EVT_PEX_MEM_READ		0xe
+#define	RFIOS_PEX01_EVT_PEU1_DMA_WR_REC		0x20
+#define	RFIOS_PEX01_EVT_PEU1_PIO_RD_REC		0x30
+#define	RFIOS_PEX01_EVT_PEU1_DMA_RD_SENT	0x40
+#define	RFIOS_PEX01_EVT_PEU1_TLP_REC		0x50
+#define	RFIOS_PEX01_EVT_PEU1_TRP_FULL_CYCLES	0x60
+#define	RFIOS_PEX01_EVT_PEU1_TCH_FULL_CYCLES	0x70
+#define	RFIOS_PEX01_EVT_PEU1_TCD_FULL_CYCLES	0x80
+
+#define	RFIOS_PEU01_S_EVT_NONE			"event_none"
+#define	RFIOS_PEU01_S_EVT_CLK			"clock_cyc"
+#define	RFIOS_PEU01_S_EVT_INT_CFG_WR_RECD	"int_config_wr_recd"
+#define	RFIOS_PEU01_S_EVT_INT_CFG_RD_RECD	"int_config_rd_recd"
+#define	RFIOS_PEU01_S_EVT_INT_MEM_WR_RECD	"int_mem_wr_recd"
+#define	RFIOS_PEU01_S_EVT_INT_MEM_RD_RECD	"int_mem_rd_recd"
+#define	RFIOS_PEU01_S_EVT_EXT_CFG_WR_RECD	"ext_config_wr_recd"
+#define	RFIOS_PEU01_S_EVT_EXT_CFG_RD_RECD	"ext_config_rd_recd"
+#define	RFIOS_PEU01_S_EVT_EXT_MEM_WR_RECD	"ext_mem_wr_recd"
+#define	RFIOS_PEU01_S_EVT_EXT_MEM_RD_RECD	"ext_mem_rd_recd"
+#define	RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_ALL	"mem_rd_recd_all"
+#define	RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_1_15DW \
+						"mem_rd_recd_1_15dw"
+#define	RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_16_31DW \
+						"mem_rd_recd_16_31dw"
+#define	RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_32_63DW \
+						"mem_rd_recd_32_63dw"
+#define	RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_64_127DW \
+						"mem_rd_recd_64_127dw"
+#define	RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_128_255DW \
+						"mem_rd_recd_128_255dw"
+#define	RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_256_511DW \
+						"mem_rd_recd_256_511dw"
+#define	RFIOS_PEU01_S_EVT_MEM_RD_REQ_RECD_512_1024DW \
+						"mem_rd_recd_512_1024dw"
+#define	RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_ALL	"mem_wr_recd_all"
+#define	RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_1_15DW \
+						"mem_wr_recd_1_15dw"
+#define	RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_16_31DW \
+						"mem_wr_recd_16_31dw"
+#define	RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_32_63DW \
+						"mem_wr_recd_32_63dw"
+#define	RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_64_127DW \
+						"mem_wr_recd_64_127dw"
+#define	RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_128_255DW \
+						"mem_wr_recd_128_255dw"
+#define	RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_256_511DW \
+						"mem_wr_recd_256_511dw"
+#define	RFIOS_PEU01_S_EVT_MEM_WR_REQ_RECD_512_1024DW \
+						"mem_wr_recd_512_1024dw"
+#define	RFIOS_PEU01_S_EVT_XMIT_POSTED_HDR_NA_CYC \
+						"xmit_posted_hdr_na_cyc"
+#define	RFIOS_PEU01_S_EVT_XMIT_POSTED_DATA_NA_CYC \
+						"xmit_posted_data_na_cyc"
+#define	RFIOS_PEU01_S_EVT_XMIT_NON_POSTED_HDR_NA_CYC \
+						"xmit_non_posted_hdr_na_cyc"
+#define	RFIOS_PEU01_S_EVT_XMIT_NON_POSTED_DATA_NA_CYC \
+						"xmit_non_posted_data_na_cyc"
+#define	RFIOS_PEU01_S_EVT_XMIT_COMPL_HDR_NA_CYC	"xmit_compl_hdr_na_cyc"
+#define	RFIOS_PEU01_S_EVT_XMIT_COMPL_DATA_NA_CYC \
+						"xmit_compl_data_na_cyc"
+#define	RFIOS_PEU01_S_EVT_NO_XMIT_CRED_CYC	"no_xmit_cred_cyc"
+#define	RFIOS_PEU01_S_EVT_RETRY_BUFF_NA_CYC	"retry_buffer_na_cyc"
+#define	RFIOS_PEU01_S_EVT_REC_FLCTRL_COMP_EXST_CYC \
+						"rec_flw_compl_hdr_exhast_cyc"
+#define	RFIOS_PEU01_S_EVT_REC_FLCTRL_NPOST_EXST_CYC \
+						"rec_flw_npost_hdr_exhast_cyc"
+#define	RFIOS_PEU01_S_EVT_REC_FLCTRL_PST_DAT_EXST \
+						"rec_flw_post_data_exhast_cyc"
+#define	RFIOS_PEU01_S_EVT_REC_FLCTRL_PST_DT_CDT_EXST \
+						"rec_flw_post_data_cred_exh_cyc"
+#define	RFIOS_PEU01_S_EVT_REC_FLCTRL_PST_CDT_EXST \
+						"rec_flw_post_data_exh_cyc"
+#define	RFIOS_PEU01_S_EVT_REC_FLCTRL_CDT_EXST	"rec_flw_cred_exh_cyc"
+#define	RFIOS_PEU01_S_EVT_DLLP_CRC_ERRORS	"dllp_crc_errs"
+#define	RFIOS_PEU01_S_EVT_TLP_CRC_ERRORS	"tlp_crc_errs"
+#define	RFIOS_PEU01_S_EVT_TLP_RECD_WITH_EDB	"tlp_recd_with_edb"
+#define	RFIOS_PEU01_S_EVT_RECD_FC_TIMEOUT_ERROR	"recd_fc_to_errs"
+#define	RFIOS_PEU01_S_EVT_REPLAY_NUM_ROLLOVERS	"replay_num_ro"
+#define	RFIOS_PEU01_S_EVT_REPLAY_TIMER_TIMEOUTS	"replay_timer_to"
+#define	RFIOS_PEU01_S_EVT_REPLAYS_INITIATED	"replays_init"
+#define	RFIOS_PEU01_S_EVT_LTSSM_RECOVERY_CYC	"ltssm_rec_cyc"
+#define	RFIOS_PEU01_S_EVT_ENTRIES_LTSSM_RECOVERY \
+						"entries_ltssm_rec"
+#define	RFIOS_PEU01_S_EVT_REC_L0S_STATE_CYC	"rec_l0s_state_cyc"
+#define	RFIOS_PEU01_S_EVT_REC_L0S_STATE_TRANS	"rec_l0s_state_trans"
+#define	RFIOS_PEU01_S_EVT_XMIT_L0S_STATE_CYC	"xmit_l0s_state_cyc"
+#define	RFIOS_PEU01_S_EVT_XMIT_L0S_STATE_TRANS	"xmit_l0s_state_trans"
+
+
+#define	RFIOS_PEU01_EVT_NONE				0x0
+#define	RFIOS_PEU01_EVT_CLK				0x1
+#define	RFIOS_PEU01_EVT_INT_CFG_WR_RECD			0x2
+#define	RFIOS_PEU01_EVT_INT_CFG_RD_RECD			0x3
+#define	RFIOS_PEU01_EVT_INT_MEM_WR_RECD			0x4
+#define	RFIOS_PEU01_EVT_INT_MEM_RD_RECD			0x5
+#define	RFIOS_PEU01_EVT_EXT_CFG_WR_RECD			0x6
+#define	RFIOS_PEU01_EVT_EXT_CFG_RD_RECD			0x7
+#define	RFIOS_PEU01_EVT_EXT_MEM_WR_RECD			0x8
+#define	RFIOS_PEU01_EVT_EXT_MEM_RD_RECD			0x9
+#define	RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_ALL		0x10
+#define	RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_1_15DW		0x11
+#define	RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_16_31DW		0x12
+#define	RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_32_63DW		0x13
+#define	RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_64_127DW	0x14
+#define	RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_128_255DW	0x15
+#define	RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_256_511DW	0x16
+#define	RFIOS_PEU01_EVT_MEM_RD_REQ_RECD_512_1024DW	0x17
+#define	RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_ALL		0x18
+#define	RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_1_15DW		0x19
+#define	RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_16_31DW		0x1a
+#define	RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_32_63DW		0x1b
+#define	RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_64_127DW	0x1c
+#define	RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_128_255DW	0x1d
+#define	RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_256_511DW	0x1e
+#define	RFIOS_PEU01_EVT_MEM_WR_REQ_RECD_512_1024DW	0x1f
+#define	RFIOS_PEU01_EVT_XMIT_POSTED_HDR_NA_CYC		0x20
+#define	RFIOS_PEU01_EVT_XMIT_POSTED_DATA_NA_CYC		0x21
+#define	RFIOS_PEU01_EVT_XMIT_NON_POSTED_HDR_NA_CYC	0x22
+#define	RFIOS_PEU01_EVT_XMIT_NON_POSTED_DATA_NA_CYC	0x23
+#define	RFIOS_PEU01_EVT_XMIT_COMPL_HDR_NA_CYC		0x24
+#define	RFIOS_PEU01_EVT_XMIT_COMPL_DATA_NA_CYC		0x25
+#define	RFIOS_PEU01_EVT_NO_XMIT_CRED_CYC		0x26
+#define	RFIOS_PEU01_EVT_RETRY_BUFF_NA_CYC		0x27
+#define	RFIOS_PEU01_EVT_REC_FLCTRL_COMP_EXST_CYC	0x28
+#define	RFIOS_PEU01_EVT_REC_FLCTRL_NPOST_EXST_CYC	0x29
+#define	RFIOS_PEU01_EVT_REC_FLCTRL_PST_DAT_EXST		0x2a
+#define	RFIOS_PEU01_EVT_REC_FLCTRL_PST_DT_CDT_EXST	0x2b
+#define	RFIOS_PEU01_EVT_REC_FLCTRL_PST_CDT_EXST		0x2c
+#define	RFIOS_PEU01_EVT_REC_FLCTRL_CDT_EXST		0x2d
+#define	RFIOS_PEU01_EVT_DLLP_CRC_ERRORS			0x30
+#define	RFIOS_PEU01_EVT_TLP_CRC_ERRORS			0x31
+#define	RFIOS_PEU01_EVT_TLP_RECD_WITH_EDB		0x32
+#define	RFIOS_PEU01_EVT_RECD_FC_TIMEOUT_ERROR		0x33
+#define	RFIOS_PEU01_EVT_REPLAY_NUM_ROLLOVERS		0x34
+#define	RFIOS_PEU01_EVT_REPLAY_TIMER_TIMEOUTS		0x35
+#define	RFIOS_PEU01_EVT_REPLAYS_INITIATED		0x36
+#define	RFIOS_PEU01_EVT_LTSSM_RECOVERY_CYC		0x37
+#define	RFIOS_PEU01_EVT_ENTRIES_LTSSM_RECOVERY		0x38
+#define	RFIOS_PEU01_EVT_REC_L0S_STATE_CYC		0x40
+#define	RFIOS_PEU01_EVT_REC_L0S_STATE_TRANS		0x41
+#define	RFIOS_PEU01_EVT_XMIT_L0S_STATE_CYC		0x42
+#define	RFIOS_PEU01_EVT_XMIT_L0S_STATE_TRANS		0x43
+
+extern int rfiospc_get_perfreg(cntr_handle_t handle, int regid, uint64_t *data);
+extern int rfiospc_set_perfreg(cntr_handle_t handle, int regid, uint64_t data);
+
+extern int rfios_access_hv(iospc_t *iospc_p, void *arg, int op, int regid,
+    uint64_t *data);
+extern int rfios_access_init(iospc_t *iospc_p, iospc_ksinfo_t *ksinfo_p);
+extern int rfios_access_fini(iospc_t *iospc_p, iospc_ksinfo_t *ksinfo_p);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _RFIOSPC_TABLES_H */
--- a/usr/src/uts/sun4v/io/n2rng/n2rng.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/io/n2rng/n2rng.c	Fri Dec 11 10:41:17 2009 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -221,6 +221,12 @@
 		 * Victoria Falls
 		 */
 		n2rng->n_binding = N2RNG_CPU_VF;
+	} else if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_KT,
+	    strlen(N2RNG_BINDNAME_KT)) == 0) {
+		/*
+		 * Rainbow Falls
+		 */
+		n2rng->n_binding = N2RNG_CPU_KT;
 	} else {
 		n2rng_diperror(dip,
 		    "unable to determine n2rng (cpu) binding (%s)",
--- a/usr/src/uts/sun4v/io/niumx/niumx.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/io/niumx/niumx.c	Fri Dec 11 10:41:17 2009 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -133,7 +133,6 @@
 };
 
 static void *niumx_state;
-static niumx_ih_t niumx_ihtable[NIUMX_MAX_INTRS];
 
 /*
  * forward function declarations:
@@ -189,9 +188,10 @@
 void
 niumx_intr_dist(void *arg)
 {
-	kmutex_t 	*lock_p = (kmutex_t *)arg;
+	niumx_devstate_t	*niumxds_p = (niumx_devstate_t *)arg;
+	kmutex_t 	*lock_p = &niumxds_p->niumx_mutex;
 	int		i;
-	niumx_ih_t	*ih_p = niumx_ihtable;
+	niumx_ih_t	*ih_p = niumxds_p->niumx_ihtable;
 
 	DBG(DBG_A_INTX, NULL, "niumx_intr_dist entered\n");
 	mutex_enter(lock_p);
@@ -270,7 +270,7 @@
 		    NIUMX_DEVHDLE_MASK);
 
 		/* add interrupt redistribution callback */
-		intr_dist_add(niumx_intr_dist, &niumxds_p->niumx_mutex);
+		intr_dist_add(niumx_intr_dist, niumxds_p);
 
 		niumxds_p->niumx_fm_cap = DDI_FM_EREPORT_CAPABLE;
 
@@ -305,7 +305,7 @@
 		niumxds_p = (niumx_devstate_t *)
 		    ddi_get_soft_state(niumx_state, ddi_get_instance(dip));
 
-		intr_dist_rem(niumx_intr_dist, &niumxds_p->niumx_mutex);
+		intr_dist_rem(niumx_intr_dist, niumxds_p);
 		ddi_fm_fini(dip);
 		mutex_destroy(&niumxds_p->niumx_mutex);
 		ddi_soft_state_free(niumx_state, ddi_get_instance(dip));
@@ -864,6 +864,11 @@
 	devino_t	*inos_p;
 	int		inoslen, ret = DDI_SUCCESS;
 	uint64_t	hvret;
+	niumx_devstate_t	*niumxds_p;	/* devstate pointer */
+	int 		instance = ddi_get_instance(dip);
+
+	niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state,
+	    instance);
 
 	ASSERT(hdlp->ih_inum < NIUMX_MAX_INTRS);
 
@@ -873,7 +878,8 @@
 		ret = DDI_FAILURE;
 		goto fail;
 	}
-	ih_p = niumx_ihtable + inos_p[hdlp->ih_inum];
+
+	ih_p = niumxds_p->niumx_ihtable + inos_p[hdlp->ih_inum];
 	DBG(DBG_A_INTX, dip, "niumx_set_intr: rdip=%s%d, valid=%d %s (%x,%x)\n",
 	    NAMEINST(rdip), valid, valid ? "enabling" : "disabling",
 	    ih_p->ih_inum, ih_p->ih_sysino);
@@ -907,6 +913,11 @@
 	uint64_t	hvret;
 	devino_t	*inos_p, ino; /* INO numbers, from "interrupts" prop */
 	sysino_t	sysino;
+	niumx_devstate_t	*niumxds_p;	/* devstate pointer */
+	int		instance = ddi_get_instance(dip);
+
+	niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state,
+	    instance);
 
 	/* get new ino */
 	if (hdlp->ih_inum >= NIUMX_MAX_INTRS) {
@@ -920,7 +931,7 @@
 		ret = DDI_FAILURE;
 		goto done;
 	}
-	ih_p = niumx_ihtable + inos_p[hdlp->ih_inum];
+	ih_p = niumxds_p->niumx_ihtable + inos_p[hdlp->ih_inum];
 	ino = inos_p[hdlp->ih_inum];
 	kmem_free(inos_p, inoslen);
 	if ((hvret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), ino,
@@ -994,6 +1005,11 @@
 	int		inoslen, ret = DDI_SUCCESS, state;
 	hrtime_t	start;
 	sysino_t 	sysino;
+	niumx_devstate_t	*niumxds_p;	/* devstate pointer */
+	int		instance = ddi_get_instance(dip);
+
+	niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state,
+	    instance);
 
 	ASSERT(hdlp->ih_inum < NIUMX_MAX_INTRS);
 
@@ -1003,7 +1019,7 @@
 		ret = DDI_FAILURE;
 		goto fail1;
 	}
-	ih_p = niumx_ihtable + inos_p[hdlp->ih_inum];
+	ih_p = niumxds_p->niumx_ihtable + inos_p[hdlp->ih_inum];
 	sysino = ih_p->ih_sysino;
 	DBG(DBG_R_INTX, dip, "removing (%x,%x)\n", ih_p->ih_inum, sysino);
 
--- a/usr/src/uts/sun4v/io/niumx/niumx_var.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/io/niumx/niumx_var.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -72,16 +72,17 @@
 	struct niumx_ih	*ih_next;	/* next in the chain */
 } niumx_ih_t;
 
+#define	NIUMX_MAX_INTRS	64
+
 typedef struct niumx_devstate {
 	dev_info_t *dip;
 	devhandle_t	niumx_dev_hdl;	/* device handle */
 	kmutex_t niumx_mutex;
 	int niumx_fm_cap;
 	ddi_iblock_cookie_t niumx_fm_ibc;
+	niumx_ih_t niumx_ihtable[NIUMX_MAX_INTRS];
 } niumx_devstate_t;
 
-#define	NIUMX_MAX_INTRS	64
-
 /*
  * flags for overloading dmai_inuse field of the dma request structure:
  */
--- a/usr/src/uts/sun4v/io/px/px_err.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/io/px/px_err.c	Fri Dec 11 10:41:17 2009 -0800
@@ -53,10 +53,14 @@
     px_rc_err_t *epkt);
 static int px_intr_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
     px_rc_err_t *epkt);
+static int px_port_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
+    px_rc_err_t *epkt);
 static int px_pcie_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
     px_rc_err_t *epkt);
 static int px_intr_handle_errors(dev_info_t *dip, ddi_fm_error_t *derr,
     px_rc_err_t *epkt);
+static int px_port_handle_errors(dev_info_t *dip, ddi_fm_error_t *derr,
+    px_rc_err_t *epkt);
 static void px_fix_legacy_epkt(dev_info_t *dip, ddi_fm_error_t *derr,
     px_rc_err_t *epkt);
 static int px_mmu_handle_lookup(dev_info_t *dip, ddi_fm_error_t *derr,
@@ -320,6 +324,9 @@
 	case BLOCK_INTR:
 		err = px_intr_epkt_severity(dip, derr, epkt);
 		break;
+	case BLOCK_PORT:
+		err = px_port_epkt_severity(dip, derr, epkt);
+		break;
 	case BLOCK_PCIE:
 		is_block_pci = B_TRUE;
 		err = px_pcie_epkt_severity(dip, derr, epkt);
@@ -404,6 +411,11 @@
 		    is_valid_epkt ? pec->ehdl : 0,
 		    EPKT_STICK, DATA_TYPE_UINT64,
 		    is_valid_epkt ? pec->stick : 0,
+		    EPKT_DW0, DATA_TYPE_UINT64, ((uint64_t *)pec)[3],
+		    EPKT_DW1, DATA_TYPE_UINT64, ((uint64_t *)pec)[4],
+		    EPKT_DW2, DATA_TYPE_UINT64, ((uint64_t *)pec)[5],
+		    EPKT_DW3, DATA_TYPE_UINT64, ((uint64_t *)pec)[6],
+		    EPKT_DW4, DATA_TYPE_UINT64, ((uint64_t *)pec)[7],
 		    EPKT_PEC_DESCR, DATA_TYPE_STRING, descr_buf);
 	} else {
 		(void) snprintf(descr_buf, sizeof (descr_buf),
@@ -431,6 +443,11 @@
 		    is_valid_epkt ? epkt->ehdl : 0,
 		    EPKT_STICK, DATA_TYPE_UINT64,
 		    is_valid_epkt ? epkt->stick : 0,
+		    EPKT_DW0, DATA_TYPE_UINT64, ((uint64_t *)epkt)[3],
+		    EPKT_DW1, DATA_TYPE_UINT64, ((uint64_t *)epkt)[4],
+		    EPKT_DW2, DATA_TYPE_UINT64, ((uint64_t *)epkt)[5],
+		    EPKT_DW3, DATA_TYPE_UINT64, ((uint64_t *)epkt)[6],
+		    EPKT_DW4, DATA_TYPE_UINT64, ((uint64_t *)epkt)[7],
 		    EPKT_RC_DESCR, DATA_TYPE_STRING, descr_buf);
 	}
 }
@@ -582,6 +599,73 @@
 
 /* ARGSUSED */
 static int
+px_port_handle_errors(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
+{
+	pf_pcie_adv_err_regs_t	adv_reg;
+	uint16_t		s_status;
+	int			sts = PX_PANIC;
+
+	/*
+	 * Check for failed non-posted writes, which are errors that are not
+	 * defined in the PCIe spec.  If not return panic.
+	 */
+	if (!((epkt->rc_descr.op == OP_PIO) &&
+	    (epkt->rc_descr.phase == PH_IRR))) {
+		sts = (PX_PANIC);
+		goto done;
+	}
+
+	/*
+	 * Gather the error logs, if they do not exist just return with no panic
+	 * and let the fabric message take care of the error.
+	 */
+	if (!epkt->rc_descr.H) {
+		sts = (PX_NO_PANIC);
+		goto done;
+	}
+
+	adv_reg.pcie_ue_hdr[0] = (uint32_t)(epkt->hdr[0]);
+	adv_reg.pcie_ue_hdr[1] = (uint32_t)(epkt->hdr[0] >> 32);
+	adv_reg.pcie_ue_hdr[2] = (uint32_t)(epkt->hdr[1]);
+	adv_reg.pcie_ue_hdr[3] = (uint32_t)(epkt->hdr[1] >> 32);
+
+	sts = pf_tlp_decode(PCIE_DIP2BUS(dip), &adv_reg);
+
+	if (epkt->rc_descr.M)
+		adv_reg.pcie_ue_tgt_addr = epkt->addr;
+
+	if (!((sts == DDI_SUCCESS) || (epkt->rc_descr.M))) {
+		/* Let the fabric message take care of error */
+		sts = PX_NO_PANIC;
+		goto done;
+	}
+
+	/* See if the failed transaction belonged to a hardened driver */
+	if (pf_hdl_lookup(dip, derr->fme_ena,
+	    adv_reg.pcie_ue_tgt_trans, adv_reg.pcie_ue_tgt_addr,
+	    adv_reg.pcie_ue_tgt_bdf) == PF_HDL_FOUND)
+		sts = (PX_NO_PANIC);
+	else
+		sts = (PX_PANIC);
+
+	/* Add pfd to cause a fabric scan */
+	switch (epkt->rc_descr.cond) {
+	case CND_RCA:
+		s_status = PCI_STAT_R_TARG_AB;
+		break;
+	case CND_RUR:
+		s_status = PCI_STAT_R_MAST_AB;
+		break;
+	}
+	px_rp_en_q(DIP_TO_STATE(dip), adv_reg.pcie_ue_tgt_bdf,
+	    adv_reg.pcie_ue_tgt_addr, s_status);
+
+done:
+	return (sts);
+}
+
+/* ARGSUSED */
+static int
 px_pcie_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
 {
 	px_pec_err_t	*pec_p = (px_pec_err_t *)epkt;
--- a/usr/src/uts/sun4v/io/px/px_err.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/io/px/px_err.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,15 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef _SYS_PX_ERR_H
 #define	_SYS_PX_ERR_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifdef	__cplusplus
 extern "C" {
 #endif
@@ -40,6 +38,7 @@
 #define	BLOCK_MMU		0x2
 #define	BLOCK_INTR		0x3
 #define	BLOCK_PCIE		0x4
+#define	BLOCK_PORT		0x5
 #define	BLOCK_UNKNOWN		0xe
 
 /* Op definitions for HOSTBUS */
@@ -48,7 +47,6 @@
 #define	OP_DMA			0x2
 #define	OP_UNKNOWN		0xe
 
-
 /* Op definitions for MMU */
 #define	OP_RESERVED		0x0
 #define	OP_XLAT			0x1
@@ -62,6 +60,14 @@
 #define	OP_MSI64		0x2
 #define	OP_MSIQ			0x3
 #define	OP_PCIEMSG		0x4
+#define	OP_FIXED		0x5
+#define	OP_UNKNOWN		0xe
+
+/* Op definitions for PORT */
+#define	OP_RESERVED		0x0
+#define	OP_PIO			0x1
+#define	OP_DMA			0x2
+#define	OP_LINK			0x3
 #define	OP_UNKNOWN		0xe
 
 /* Phase definitons */
@@ -71,34 +77,33 @@
 #define	PH_UNKNOWN		0xe
 #define	PH_IRR			0xf
 
-/* Condition definitions for ADDR Phase */
+/* Phase definitions for PORT/Link */
+#define	PH_FC			0x1
+
+
+/* Condition definitions for any major Block/Op/Phase */
 #define	CND_RESERVED		0x0
 #define	CND_ILL			0x1
 #define	CND_UNMAP		0x2
+#define	CND_INT			0x3
+#define	CND_UE			0x4
+#define	CND_INV			0x6
 #define	CND_UNKNOWN		0xe
 #define	CND_IRR			0xf
 
-/* Condition definitions for DATA Phase */
-#define	CND_RESERVED		0x0
-#define	CND_ILL			0x1
-#define	CND_INT			0x3
-#define	CND_UE			0x4
-#define	CND_UNKNOWN		0xe
-#define	CND_IRR			0xf
-
-/* Condition definitions for MMU Block ADDR phase */
-#define	CND_PROT		0x5
-#define	CND_INV			0x6
-
-/* Condition definitions for INTR Block MSIQ  Op Data phase */
+/* Additional condition definitions for INTR Block MSIQ phase */
 #define	CND_OV			0x5
 
-/* Condition definitions for Unkown phase */
-#define	CND_RESERVED		0x0
-#define	CND_ILL			0x1
+/* Additional condition definitions for MMU|INTR Block ADDR phase */
+#define	CND_PROT		0x5
+
+/* Additional condition definitions for DATA phase */
 #define	CND_TO			0x5
-#define	CND_UNKNOWN		0xe
-#define	CND_IRR			0xf
+
+/* Additional condition definitions for Port Link phase */
+#define	CND_RCA			0x7
+#define	CND_RUR			0x8
+#define	CND_UC			0x9
 
 /* Dir definitions for HOSTBUS & MMU */
 #define	DIR_RESERVED		0x0
@@ -115,31 +120,60 @@
 #define	EPKT_SYSINO		"sysino"
 #define	EPKT_EHDL		"ehdl"
 #define	EPKT_STICK		"stick"
+#define	EPKT_DW0		"dw0"
+#define	EPKT_DW1		"dw1"
+#define	EPKT_DW2		"dw2"
+#define	EPKT_DW3		"dw3"
+#define	EPKT_DW4		"dw4"
 #define	EPKT_RC_DESCR		"rc_descr"
 #define	EPKT_PEC_DESCR		"pec_descr"
 
+#ifndef _ESC
 typedef struct root_complex {
 	uint64_t  sysino;
 	uint64_t  ehdl;
 	uint64_t  stick;
 	struct  {
+#if defined(_BIT_FIELDS_LTOH)
+		uint32_t S	: 1,	/* Also the "Q" flag */
+			M	: 1,
+			D	: 1,
+			R	: 1,
+			H	: 1,
+			C	: 1,
+			I	: 1,
+			B	: 1,
+				: 3,
+			STOP	: 1,
+			dir	: 4,
+			cond	: 4,
+			phase	: 4,
+			op	: 4,
+			block	: 4;
+#elif defined(_BIT_FIELDS_HTOL)
 		uint32_t block	: 4,
 			op	: 4,
 			phase	: 4,
 			cond	: 4,
 			dir	: 4,
 			STOP	: 1,
-				: 6,
+				: 3,
+			B	: 1,
+			I	: 1,
+			C	: 1,
 			H	: 1,
 			R	: 1,
 			D	: 1,
 			M	: 1,
-			S	: 1;
+			S	: 1;	/* Also the "Q" flag */
+#else
+#error "bit field not defined"
+#endif
 	} rc_descr;
-	uint32_t  size;
+	uint32_t  size;			/* Also the EQ Num */
 	uint64_t  addr;
 	uint64_t  hdr[2];
-	uint64_t  reserved;
+	uint64_t  reserved;		/* Contains Port */
 } px_rc_err_t;
 
 typedef struct pec_block_err {
@@ -169,6 +203,7 @@
 	uint32_t  err_src_reg;
 	uint32_t  root_err_status;
 } px_pec_err_t;
+#endif	/* _ESC */
 
 #ifdef	__cplusplus
 }
--- a/usr/src/uts/sun4v/io/px/px_err_gen.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/io/px/px_err_gen.c	Fri Dec 11 10:41:17 2009 -0800
@@ -19,12 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 /*
  * The file has been code generated.  Do NOT modify this file directly.  Please
  * use the sun4v PCIe FMA code generation tool.
@@ -32,6 +30,8 @@
  * This file was generated for the following platforms:
  * - Fire
  * - N2PIU
+ * - Rainbow Falls
+ * - Victoria Falls
  */
 
 /* ARGSUSED */
@@ -68,6 +68,19 @@
 				case DIR_RDWR:
 					err = PX_PANIC;
 					break;
+				case DIR_UNKNOWN:
+					err = PX_PANIC;
+					break;
+				case DIR_WRITE:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			case CND_TO:
+				switch (epkt->rc_descr.dir) {
+				case DIR_READ:
+					err = PX_PANIC;
+					break;
 				case DIR_WRITE:
 					err = PX_PANIC;
 					break;
@@ -122,10 +135,10 @@
 			switch (epkt->rc_descr.cond) {
 			case CND_INT:
 				switch (epkt->rc_descr.dir) {
-				case DIR_READ:
+				case DIR_RDWR:
 					err = PX_PANIC;
 					break;
-				case DIR_RDWR:
+				case DIR_UNKNOWN:
 					err = PX_PANIC;
 					break;
 				case DIR_WRITE:
@@ -180,6 +193,13 @@
 			break;
 		case PH_DATA:
 			switch (epkt->rc_descr.cond) {
+			case CND_INT:
+				switch (epkt->rc_descr.dir) {
+				case DIR_UNKNOWN:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
 			case CND_UE:
 				switch (epkt->rc_descr.dir) {
 				case DIR_IRR:
@@ -244,6 +264,24 @@
 		break;
 	case OP_TBW:
 		switch (epkt->rc_descr.phase) {
+		case PH_ADDR:
+			switch (epkt->rc_descr.cond) {
+			case CND_UNKNOWN:
+				switch (epkt->rc_descr.dir) {
+				case DIR_UNKNOWN:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			case CND_UNMAP:
+				switch (epkt->rc_descr.dir) {
+				case DIR_UNKNOWN:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			} /* CND */
+			break;
 		case PH_DATA:
 			switch (epkt->rc_descr.cond) {
 			case CND_INT:
@@ -269,6 +307,9 @@
 				case DIR_IRR:
 					err = PX_PANIC;
 					break;
+				case DIR_UNKNOWN:
+					err = PX_PANIC;
+					break;
 				} /* DIR */
 				break;
 			} /* CND */
@@ -311,6 +352,13 @@
 			break;
 		case PH_DATA:
 			switch (epkt->rc_descr.cond) {
+			case CND_INT:
+				switch (epkt->rc_descr.dir) {
+				case DIR_UNKNOWN:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
 			case CND_INV:
 				switch (epkt->rc_descr.dir) {
 				case DIR_RDWR:
@@ -330,6 +378,9 @@
 				break;
 			case CND_PROT:
 				switch (epkt->rc_descr.dir) {
+				case DIR_RDWR:
+					err = PX_NO_PANIC;
+					break;
 				case DIR_WRITE:
 					err = PX_NO_PANIC;
 					break;
@@ -368,6 +419,21 @@
 		return (PX_PANIC);
 
 	switch (epkt->rc_descr.op) {
+	case OP_FIXED:
+		switch (epkt->rc_descr.phase) {
+		case PH_UNKNOWN:
+			switch (epkt->rc_descr.cond) {
+			case CND_ILL:
+				switch (epkt->rc_descr.dir) {
+				case DIR_INGRESS:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			} /* CND */
+			break;
+		} /* PH */
+		break;
 	case OP_MSI32:
 		switch (epkt->rc_descr.phase) {
 		case PH_DATA:
@@ -436,6 +502,17 @@
 		break;
 	case OP_MSIQ:
 		switch (epkt->rc_descr.phase) {
+		case PH_DATA:
+			switch (epkt->rc_descr.cond) {
+			case CND_INT:
+				switch (epkt->rc_descr.dir) {
+				case DIR_UNKNOWN:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			} /* CND */
+			break;
 		case PH_UNKNOWN:
 			switch (epkt->rc_descr.cond) {
 			case CND_ILL:
@@ -467,9 +544,160 @@
 					err = PX_PANIC;
 					break;
 				} /* DIR */
+				break;
+			} /* CND */
+			break;
+		} /* PH */
+		break;
+	case OP_UNKNOWN:
+		switch (epkt->rc_descr.phase) {
+		case PH_DATA:
+			switch (epkt->rc_descr.cond) {
+			case CND_INT:
+				switch (epkt->rc_descr.dir) {
+				case DIR_UNKNOWN:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			case CND_ILL:
+				switch (epkt->rc_descr.dir) {
+				case DIR_IRR:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			} /* CND */
+			break;
+		case PH_UNKNOWN:
+			switch (epkt->rc_descr.cond) {
+			case CND_ILL:
+				switch (epkt->rc_descr.dir) {
+				case DIR_IRR:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
 			} /* CND */
 		} /* PH */
 	} /* OP */
 
 	return (err);
 }
+
+
+/* ARGSUSED */
+static int
+px_port_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
+{
+	int err = 0;
+
+	/* STOP bit indicates a secondary error. Panic if it is set */
+	if (epkt->rc_descr.STOP == 1)
+		return (PX_PANIC);
+
+	switch (epkt->rc_descr.op) {
+	case OP_DMA:
+		switch (epkt->rc_descr.phase) {
+		case PH_DATA:
+			switch (epkt->rc_descr.cond) {
+			case CND_INT:
+				switch (epkt->rc_descr.dir) {
+				case DIR_READ:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			} /* CND */
+			break;
+		} /* PH */
+		break;
+	case OP_LINK:
+		switch (epkt->rc_descr.phase) {
+		case PH_FC:
+			switch (epkt->rc_descr.cond) {
+			case CND_TO:
+				switch (epkt->rc_descr.dir) {
+				case DIR_IRR:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			} /* CND */
+			break;
+		} /* PH */
+		break;
+	case OP_PIO:
+		switch (epkt->rc_descr.phase) {
+		case PH_DATA:
+			switch (epkt->rc_descr.cond) {
+			case CND_INT:
+				switch (epkt->rc_descr.dir) {
+				case DIR_READ:
+					err = PX_PANIC;
+					break;
+				case DIR_UNKNOWN:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			} /* CND */
+			break;
+		case PH_IRR:
+			switch (epkt->rc_descr.cond) {
+			case CND_INV:
+				switch (epkt->rc_descr.dir) {
+				case DIR_RDWR:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			case CND_RCA:
+				switch (epkt->rc_descr.dir) {
+				case DIR_WRITE:
+					err = px_port_handle_errors(dip, derr,
+					    epkt);
+					break;
+				} /* DIR */
+				break;
+			case CND_RUR:
+				switch (epkt->rc_descr.dir) {
+				case DIR_WRITE:
+					err = px_port_handle_errors(dip, derr,
+					    epkt);
+					break;
+				} /* DIR */
+				break;
+			case CND_TO:
+				switch (epkt->rc_descr.dir) {
+				case DIR_WRITE:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+				break;
+			case CND_UC:
+				switch (epkt->rc_descr.dir) {
+				case DIR_IRR:
+					err = PX_NO_PANIC;
+					break;
+				} /* DIR */
+				break;
+			} /* CND */
+			break;
+		} /* PH */
+		break;
+	case OP_UNKNOWN:
+		switch (epkt->rc_descr.phase) {
+		case PH_DATA:
+			switch (epkt->rc_descr.cond) {
+			case CND_INT:
+				switch (epkt->rc_descr.dir) {
+				case DIR_UNKNOWN:
+					err = PX_PANIC;
+					break;
+				} /* DIR */
+			} /* CND */
+		} /* PH */
+	} /* OP */
+
+	return (err);
+}
--- a/usr/src/uts/sun4v/io/vnet_dds.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/io/vnet_dds.c	Fri Dec 11 10:41:17 2009 -0800
@@ -154,13 +154,24 @@
 #endif
 
 /*
- * Hypervisor N2/NIU services information.
+ * Hypervisor N2/NIU services information:
+ *
+ * The list of HV versions that support NIU HybridIO. Note,
+ * the order is higher version to a lower version, as the
+ * registration is attempted in this order.
  */
-static hsvc_info_t niu_hsvc = {
-	HSVC_REV_1, NULL, HSVC_GROUP_NIU, 1, 1, "vnet_dds"
+static hsvc_info_t niu_hsvc[] = {
+	{HSVC_REV_1, NULL, HSVC_GROUP_NIU, 2, 0, "vnet_dds"},
+	{HSVC_REV_1, NULL, HSVC_GROUP_NIU, 1, 1, "vnet_dds"}
 };
 
 /*
+ * Index that points to the successful HV version that
+ * is registered.
+ */
+static int niu_hsvc_index = -1;
+
+/*
  * Lock to serialize the NIU device node related operations.
  */
 kmutex_t vdds_dev_lock;
@@ -173,18 +184,29 @@
 void
 vdds_mod_init(void)
 {
+	int i;
 	int rv;
-	uint64_t minor;
+	uint64_t minor = 0;
 
-	rv = hsvc_register(&niu_hsvc, &minor);
 	/*
-	 * Only HV version 1.1 is capable of NIU Hybrid IO.
+	 * Try register one by one from niu_hsvc.
 	 */
-	if ((rv == 0) && (minor == 1)) {
-		vdds_hv_hio_capable = B_TRUE;
+	for (i = 0; i < (sizeof (niu_hsvc) / sizeof (hsvc_info_t)); i++) {
+		rv = hsvc_register(&niu_hsvc[i], &minor);
+		if (rv == 0) {
+			if (minor == niu_hsvc[i].hsvc_minor) {
+				vdds_hv_hio_capable = B_TRUE;
+				niu_hsvc_index = i;
+				break;
+			} else {
+				(void) hsvc_unregister(&niu_hsvc[i]);
+			}
+		}
 	}
 	mutex_init(&vdds_dev_lock, NULL, MUTEX_DRIVER, NULL);
-	DBG1(NULL, "HV HIO capable");
+	DBG2(NULL, "HV HIO capable=%d ver(%ld.%ld)", vdds_hv_hio_capable,
+	    (niu_hsvc_index == -1) ? 0 : niu_hsvc[niu_hsvc_index].hsvc_major,
+	    minor);
 }
 
 /*
@@ -193,7 +215,9 @@
 void
 vdds_mod_fini(void)
 {
-	(void) hsvc_unregister(&niu_hsvc);
+	if (niu_hsvc_index != -1) {
+		(void) hsvc_unregister(&niu_hsvc[niu_hsvc_index]);
+	}
 	mutex_destroy(&vdds_dev_lock);
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/iospc/Makefile	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,99 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+#
+# This makefile drives the production of the sun4v IO Performance Counter Driver
+#
+# sun4v implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE	= ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE		= iospc
+OBJECTS		= $(IOSPC_OBJS:%=$(OBJS_DIR)/%)
+LINTS		= $(IOSPC_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE	= $(ROOT_PSM_DRV_DIR)/$(MODULE)
+CONF_SRCDIR	= $(UTSBASE)/sun4v/io/iospc
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4v/Makefile.sun4v
+
+#
+# Define targets
+#
+ALL_TARGET	= $(BINARY) $(SRC_CONFFILE)
+LINT_TARGET	= $(MODULE).lint
+INSTALL_TARGET	= $(BINARY) $(ROOTMODULE)  $(ROOT_CONFFILE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS		+= $(CCVERBOSE)
+
+CFLAGS += -dalign -DMODULE_NAME=\"$(MODULE)\"
+
+LINTFLAGS += -DMODULE_NAME=\"$(MODULE)\"
+
+#
+# Module Dependencies
+#
+LDFLAGS		+= -dy -Nmisc/ds
+
+INC_PATH        += -I$(UTSBASE)/sun4v/io/iospc
+
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def:		$(DEF_DEPS)
+
+all:		$(ALL_DEPS)
+
+clean:		$(CLEAN_DEPS)
+
+clobber:	$(CLOBBER_DEPS)
+
+lint:		$(LINT_DEPS)
+
+modlintlib:	$(MODLINTLIB_DEPS)
+
+clean.lint:	$(CLEAN_LINT_DEPS)
+
+install:	$(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/$(PLATFORM)/Makefile.targ
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/kt/Makefile	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,117 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# uts/sun4v/kt/Makefile
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+#	This makefile drives the production of the UltraSPARC-KT cpu module.
+#
+#	sun4v implementation architecture dependent
+#
+
+#
+#	Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE	= ../..
+
+#
+#	Define the module and object file sets.
+#
+MODULE		= SUNW,UltraSPARC-KT
+OBJECTS		= $(NIAGARA2CPU_OBJS:%=$(OBJS_DIR)/%)
+LINTS		= $(NIAGARA2CPU_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE	= $(ROOT_PSM_CPU_DIR)/$(MODULE)
+
+CPU_DIR		= .
+HERE		= ../kt
+
+#
+#	Include common rules.
+#
+include $(UTSBASE)/sun4v/Makefile.sun4v
+
+#
+#	Override defaults
+#
+CLEANFILES	+= $(CPULIB) $(SYM_MOD)
+
+#
+#	Define targets
+#
+ALL_TARGET	= $(SYM_MOD)
+LINT_TARGET	= $(MODULE).lint
+INSTALL_TARGET	= def $(BINARY) $(ROOTMODULE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE) -DKT_IMPL
+
+#
+# The ATOMIC_BO_ENABLE_SHIFT enables backoff in atomic routines.
+# It is also used to scale final limit value w.r.t. number of
+# online cpus.
+#
+ATOMIC_BO_FLAG = -DATOMIC_BO_ENABLE_SHIFT=4
+CFLAGS += $(ATOMIC_BO_FLAG)
+CPPFLAGS +=$(ATOMIC_BO_FLAG)
+AS_CPPFLAGS += $(ATOMIC_BO_FLAG)
+
+#
+# cpu-module-specific flags
+#
+CPPFLAGS += -DCPU_MODULE -DKT_IMPL
+CPPFLAGS += -DSUN4V_CONTIG_MEM_PREALLOC_SIZE_MB=68
+AS_CPPFLAGS += -DCPU_MODULE -DKT_IMPL
+
+#
+#	Default build targets.
+#
+.KEEP_STATE:
+
+def:		$(DEF_DEPS)
+
+all:		$(ALL_DEPS)
+
+clean:		$(CLEAN_DEPS)
+
+clobber:	$(CLOBBER_DEPS)
+
+lint:		$(LINT_DEPS)
+
+modlintlib:	$(MODLINTLIB_DEPS)
+
+clean.lint:	$(CLEAN_LINT_DEPS)
+
+install:	$(INSTALL_DEPS)
+
+$(CPULIB):	$(BINARY)
+	$(BUILD.SO) $(BINARY)
+
+$(SYM_MOD):	$(UNIX_O) $(CPULIB)
+	@echo "resolving symbols against unix.o"
+	@(cd $(UNIX_DIR); pwd; \
+	    CPU_DIR=$(HERE) SYM_MOD=$(HERE)/$(SYM_MOD) $(MAKE) symcheck)
+
+#	Include common targets.
+#
+include $(UTSBASE)/$(PLATFORM)/Makefile.targ
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/sun4v/kt_pcbe/Makefile	Fri Dec 11 10:41:17 2009 -0800
@@ -0,0 +1,86 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+# This Makefile builds the UltraSPARC-KT Performance Counter BackEnd (PCBE).
+#
+
+UTSBASE = ../..
+
+#
+#	Define module and object file sets.
+#
+MODULE		= pcbe.SUNW,UltraSPARC-KT
+OBJECTS		= $(N2_PCBE_OBJS:%=$(OBJS_DIR)/%)
+LINTS		= $(N2_PCBE_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE	= $(ROOT_PSM_PCBE_DIR)/$(MODULE)
+
+#
+#	Include common rules.
+#
+include $(UTSBASE)/sun4v/Makefile.sun4v
+
+#
+#	Define targets.
+#
+ALL_TARGET	= $(BINARY)
+LINT_MODULE	= kt_pcbe
+LINT_TARGET	= $(LINT_MODULE).lint
+INSTALL_TARGET	= $(BINARY) $(ROOTMODULE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += -DKT_IMPL
+
+#
+# KT-specific flags
+#
+CPPFLAGS += -DKT_IMPL
+AS_CPPFLAGS += -DKT_IMPL
+
+#
+#	Default build targets.
+#
+.KEEP_STATE:
+
+def:		$(DEF_DEPS)
+
+all:		$(ALL_DEPS)
+
+clean:		$(CLEAN_DEPS)
+
+clobber:	$(CLOBBER_DEPS)
+
+lint:		$(LINT_DEPS)
+
+modlintlib:	$(MODLINTLIB_DEPS)
+
+clean.lint:	$(CLEAN_LINT_DEPS)
+
+install:	$(INSTALL_DEPS)
+
+#
+#	Include common targets.
+#
+include $(UTSBASE)/sun4v/Makefile.targ
--- a/usr/src/uts/sun4v/ml/hcall.s	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/ml/hcall.s	Fri Dec 11 10:41:17 2009 -0800
@@ -347,9 +347,7 @@
 /*ARGSUSED*/	
 uint64_t
 hv_soft_state_get(uint64_t string, uint64_t *state)
-{ return (0); }
-
-uint64_t
+{ return (0); }uint64_t
 hv_guest_suspend(void)
 { return (0); }
 
@@ -363,6 +361,11 @@
 hv_set_stick_npt(uint64_t npt)
 { return (0); }
 
+/*ARGSUSED*/	
+uint64_t
+hv_reboot_data_set(uint64_t buffer_ra, uint64_t buffer_len)
+{ return (0); }
+
 #else	/* lint || __lint */
 
 	/*
@@ -1273,4 +1276,17 @@
 	nop
 	SET_SIZE(hv_stick_set_npt)
 
+	/*
+	 * REBOOT_DATA_SET
+	 * arg0 buffer real address
+	 * arg1 buffer length
+	 * ret0 status
+	 */
+	ENTRY(hv_reboot_data_set)
+	mov	HV_REBOOT_DATA_SET, %o5
+	ta	FAST_TRAP
+	retl
+	nop
+	SET_SIZE(hv_reboot_data_set)
+
 #endif	/* lint || __lint */
--- a/usr/src/uts/sun4v/os/error.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/os/error.c	Fri Dec 11 10:41:17 2009 -0800
@@ -100,6 +100,8 @@
 static void ce_drain(void *, struct async_flt *, errorq_elem_t *);
 static void errh_handle_attr(errh_async_flt_t *);
 static void errh_handle_asr(errh_async_flt_t *);
+static void errh_handle_sp(errh_async_flt_t *);
+static void sp_ereport_post(uint8_t);
 
 /*ARGSUSED*/
 void
@@ -153,6 +155,13 @@
 			}
 			continue;
 
+		case ERRH_DESC_SP:
+			/*
+			 * The state of the SP has changed.
+			 */
+			errh_handle_sp(&errh_flt);
+			continue;
+
 		default:
 			cmn_err(CE_WARN, "Error Descriptor 0x%llx "
 			    " invalid in resumable error handler",
@@ -868,6 +877,27 @@
 }
 
 /*
+ * Handle a SP state change.
+ */
+static void
+errh_handle_sp(errh_async_flt_t *errh_fltp)
+{
+	uint8_t		sp_state;
+
+	sp_state = (errh_fltp->errh_er.attr & ERRH_SP_MASK) >> ERRH_SP_SHIFT;
+
+	/*
+	 * Only the SP is unavailable state change is currently valid.
+	 */
+	if (sp_state == ERRH_SP_UNAVAILABLE) {
+		sp_ereport_post(sp_state);
+	} else {
+		cmn_err(CE_WARN, "Invalid SP state 0x%x in SP state change "
+		    "handler.\n", sp_state);
+	}
+}
+
+/*
  * Dump the error packet
  */
 /*ARGSUSED*/
@@ -907,3 +937,32 @@
 	}
 	mutex_exit(&errh_print_lock);
 }
+
+static void
+sp_ereport_post(uint8_t sp_state)
+{
+	nvlist_t	*ereport, *detector;
+
+	/*
+	 * Currently an ereport is only sent when the state of the SP
+	 * changes to unavailable.
+	 */
+	ASSERT(sp_state == ERRH_SP_UNAVAILABLE);
+
+	ereport = fm_nvlist_create(NULL);
+	detector = fm_nvlist_create(NULL);
+
+	/*
+	 * Create an HC-scheme detector FMRI.
+	 */
+	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
+	    "chassis", 0);
+
+	fm_ereport_set(ereport, FM_EREPORT_VERSION, "chassis.sp.unavailable",
+	    fm_ena_generate(0, FM_ENA_FMT1), detector, NULL);
+
+	(void) fm_ereport_post(ereport, EVCH_TRYHARD);
+
+	fm_nvlist_destroy(ereport, FM_NVA_FREE);
+	fm_nvlist_destroy(detector, FM_NVA_FREE);
+}
--- a/usr/src/uts/sun4v/os/hsvc.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/os/hsvc.c	Fri Dec 11 10:41:17 2009 -0800
@@ -666,6 +666,7 @@
 	{{HSVC_REV_1, NULL,	HSVC_GROUP_CORE,	1,	2, NULL}, 1},
 	{{HSVC_REV_1, NULL,	HSVC_GROUP_DIAG,	1,	0, NULL}, 1},
 	{{HSVC_REV_1, NULL,	HSVC_GROUP_INTR,	1,	0, NULL}, 0},
+	{{HSVC_REV_1, NULL,	HSVC_GROUP_REBOOT_DATA,	1,	0, NULL}, 0},
 };
 
 #define	HSVCINFO_UNIX_CNT	(sizeof (hsvcinfo_unix) / sizeof (hsvc_info_t))
--- a/usr/src/uts/sun4v/os/mach_cpu_states.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/os/mach_cpu_states.c	Fri Dec 11 10:41:17 2009 -0800
@@ -109,7 +109,7 @@
  * the next boot.
  */
 static void
-store_boot_cmd(char *args, boolean_t add_boot_str)
+store_boot_cmd(char *args, boolean_t add_boot_str, boolean_t invoke_cb)
 {
 	static char	cmd_buf[BOOT_CMD_MAX_LEN];
 	size_t		len = 1;
@@ -117,6 +117,22 @@
 	size_t		base_len = 0;
 	size_t		args_len;
 	size_t		args_max;
+	uint64_t	majornum;
+	uint64_t	minornum;
+	uint64_t	buf_pa;
+	uint64_t	status;
+
+	status = hsvc_version(HSVC_GROUP_REBOOT_DATA, &majornum, &minornum);
+
+	/*
+	 * invoke_cb is set to true when we are in a normal shutdown sequence
+	 * (interrupts are not blocked, the system is not panicking or being
+	 * suspended). In that case, we can use any method to store the boot
+	 * command. Otherwise storing the boot command can not be done using
+	 * a domain service because it can not be safely used in that context.
+	 */
+	if ((status != H_EOK) && (invoke_cb == B_FALSE))
+		return;
 
 	if (add_boot_str) {
 		(void) strcpy(cmd_buf, BOOT_CMD_BASE);
@@ -140,11 +156,25 @@
 		(void) strncpy(&cmd_buf[base_len], args, args_len);
 	}
 
-	node = prom_optionsnode();
-	if ((node == OBP_NONODE) || (node == OBP_BADNODE) ||
-	    prom_setprop(node, "reboot-command", cmd_buf, len) == -1)
-		cmn_err(CE_WARN, "Unable to store boot command for "
-		    "use on reboot");
+	/*
+	 * Save the reboot-command with HV, if reboot data group is
+	 * negotiated. Else save the reboot-command via vars-config domain
+	 * services on the SP.
+	 */
+	if (status == H_EOK) {
+		buf_pa = va_to_pa(cmd_buf);
+		status = hv_reboot_data_set(buf_pa, len);
+		if (status != H_EOK) {
+			cmn_err(CE_WARN, "Unable to store boot command for "
+			    "use on reboot with HV: error = 0x%lx", status);
+		}
+	} else {
+		node = prom_optionsnode();
+		if ((node == OBP_NONODE) || (node == OBP_BADNODE) ||
+		    prom_setprop(node, "reboot-command", cmd_buf, len) == -1)
+			cmn_err(CE_WARN, "Unable to store boot command for "
+			    "use on reboot");
+	}
 }
 
 
@@ -182,8 +212,8 @@
 		 * it completes the reset.  This causes the system
 		 * to stop at the ok prompt.
 		 */
-		if (domaining_enabled() && invoke_cb)
-			store_boot_cmd("noop", B_FALSE);
+		if (domaining_enabled())
+			store_boot_cmd("noop", B_FALSE, invoke_cb);
 		break;
 
 	case AD_POWEROFF:
@@ -221,8 +251,8 @@
 		 * before we enter restricted mode.  This is possible
 		 * only if we are not being called from panic.
 		 */
-		if (domaining_enabled() && invoke_cb)
-			store_boot_cmd(bootstr, B_TRUE);
+		if (domaining_enabled())
+			store_boot_cmd(bootstr, B_TRUE, invoke_cb);
 	}
 
 	/*
--- a/usr/src/uts/sun4v/pcbe/niagara2_pcbe.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/pcbe/niagara2_pcbe.c	Fri Dec 11 10:41:17 2009 -0800
@@ -140,12 +140,12 @@
 	char *event;
 } ni2_generic_event_t;
 
-#define	ULTRA_PCR_PRIVPIC	(UINT64_C(1) << CPC_NIAGARA2_PCR_PRIV_SHIFT)
+#define	ULTRA_PCR_PRIVPIC	(UINT64_C(1) << CPC_PCR_PRIV_SHIFT)
 #define	EV_END {NULL, 0, 0}
 #define	GEN_EV_END {NULL, NULL}
 
 static const uint64_t	allstopped = (ULTRA_PCR_PRIVPIC |
-	CPC_NIAGARA2_PCR_HOLDOV0 | CPC_NIAGARA2_PCR_HOLDOV1);
+	CPC_PCR_HOLDOV0 | CPC_PCR_HOLDOV1);
 
 /*
  * We update this array in the program and allstop routine. The array
@@ -179,6 +179,20 @@
 	{ "CPU_ifetch_to_PCX",			0x508, 0x3f },
 	{ "CPU_st_to_PCX",			0x510, 0x3f },
 	{ "MMU_ld_to_PCX",			0x520, 0x3f },
+#ifdef KT_IMPL
+	{ "DES_3DES_op",			0x601, 0xff },
+	{ "AES_op",				0x602, 0xff },
+	{ "Kasumi_op",				0x604, 0xff },
+	{ "MD5_SHA-1_SHA-256_op",		0x608, 0xff },
+	{ "MA_op",				0x610, 0xff },
+	{ "CRC_TCPIP_cksum",			0x620, 0xff },
+	{ "DES_3DES_busy_cycle",		0x701, 0xff },
+	{ "AES_busy_cycle",			0x702, 0xff },
+	{ "Kasumi_busy_cycle",			0x704, 0xff },
+	{ "MD5_SHA-1_SHA-256_busy_cycle",	0x708, 0xff },
+	{ "MA_busy_cycle",			0x710, 0xff },
+	{ "CRC_MPA_cksum",			0x720, 0xff },
+#else
 	{ "DES_3DES_op",			0x601, 0x3f },
 	{ "AES_op",				0x602, 0x3f },
 	{ "RC4_op",				0x604, 0x3f },
@@ -191,6 +205,7 @@
 	{ "MD5_SHA-1_SHA-256_busy_cycle",	0x708, 0x3f },
 	{ "MA_busy_cycle",			0x710, 0x3f },
 	{ "CRC_MPA_cksum",			0x720, 0x3f },
+#endif
 	{ "ITLB_miss",				0xb04, 0x0c },
 	{ "DTLB_miss",				0xb08, 0x0c },
 	{ "TLB_miss",				0xb0c, 0x0c },
@@ -231,6 +246,10 @@
 static const char	*cpu_impl_name = "UltraSPARC T2+";
 static const char *cpu_pcbe_ref = "See the \"UltraSPARC T2+ User's Manual\" "
 			"for descriptions of these events." CPU_REF_URL;
+#elif defined(KT_IMPL)
+static const char	*cpu_impl_name = "UltraSPARC KT";
+static const char *cpu_pcbe_ref = "See the \"UltraSPARC KT User's Manual\" "
+			"for descriptions of these events." CPU_REF_URL;
 #endif
 
 static boolean_t cpu_hsvc_available = B_TRUE;
@@ -249,10 +268,13 @@
 #elif defined(VFALLS_IMPL)
 	uint64_t		hsvc_cpu_group = HSVC_GROUP_VFALLS_CPU;
 	uint64_t		hsvc_cpu_major = VFALLS_HSVC_MAJOR;
+#elif defined(KT_IMPL)
+	uint64_t	hsvc_cpu_group = HSVC_GROUP_KT_CPU;
+	uint64_t	hsvc_cpu_major = KT_HSVC_MAJOR;
 #endif
 
-	pcr_pic0_mask = CPC_NIAGARA2_PCR_PIC0_MASK;
-	pcr_pic1_mask = CPC_NIAGARA2_PCR_PIC1_MASK;
+	pcr_pic0_mask = CPC_PCR_PIC0_MASK;
+	pcr_pic1_mask = CPC_PCR_PIC1_MASK;
 
 	/*
 	 * Validate API version for Niagara2 specific hypervisor services
@@ -336,9 +358,15 @@
 		return ("hpriv,emask");
 #elif defined(VFALLS_IMPL)
 		return ("hpriv,l2ctl,emask");
+#elif defined(KT_IMPL)
+		return ("hpriv,l2ctl,emask,sample");
 #endif
 	else
+#if defined(KT_IMPL)
+		return ("emask,sample");
+#else
 		return ("emask");
+#endif
 }
 
 static ni2_generic_event_t *
@@ -383,34 +411,45 @@
 	uint64_t	pic;
 	uint32_t	pic0, pic1;
 	boolean_t	update_pic = B_FALSE;
+	boolean_t	pic_inrange = B_FALSE;
 
 	ASSERT(getpil() >= DISP_LEVEL);
 	pcr = ultra_getpcr();
 	DTRACE_PROBE1(niagara2__getpcr, uint64_t, pcr);
-	overflow =  (pcr & CPC_NIAGARA2_PCR_OV0_MASK) >>
-	    CPC_NIAGARA2_PCR_OV0_SHIFT;
-	overflow |=  (pcr & CPC_NIAGARA2_PCR_OV1_MASK) >>
-	    CPC_NIAGARA2_PCR_OV1_SHIFT;
+	overflow =  (pcr & CPC_PCR_OV0_MASK) >>
+	    CPC_PCR_OV0_SHIFT;
+	overflow |=  (pcr & CPC_PCR_OV1_MASK) >>
+	    CPC_PCR_OV1_SHIFT;
 
 	pic = ultra_getpic();
 	pic0 = (uint32_t)(pic & PIC0_MASK);
 	pic1 = (uint32_t)((pic >> PIC1_SHIFT) & PIC0_MASK);
 
-	pcr |= (CPC_NIAGARA2_PCR_HOLDOV0 | CPC_NIAGARA2_PCR_HOLDOV1);
+	pcr |= (CPC_PCR_HOLDOV0 | CPC_PCR_HOLDOV1);
 
 	if (overflow & 0x1) {
-		pcr &= ~(CPC_NIAGARA2_PCR_OV0_MASK |
-		    CPC_NIAGARA2_PCR_HOLDOV0);
-		if (PIC_IN_OV_RANGE(pic0)) {
+		pcr &= ~(CPC_PCR_OV0_MASK |
+		    CPC_PCR_HOLDOV0);
+		pic_inrange = PIC_IN_OV_RANGE(pic0);
+#if defined(KT_IMPL)
+		if (pcr & CPC_PCR_SAMPLE_MODE_MASK)
+			pic_inrange = SAMPLE_PIC_IN_OV_RANGE(pic0);
+#endif
+		if (pic_inrange) {
 			pic0 = 0;
 			update_pic = B_TRUE;
 		}
 	}
 
 	if (overflow & 0x2) {
-		pcr &= ~(CPC_NIAGARA2_PCR_OV1_MASK |
-		    CPC_NIAGARA2_PCR_HOLDOV1);
-		if (PIC_IN_OV_RANGE(pic1)) {
+		pcr &= ~(CPC_PCR_OV1_MASK |
+		    CPC_PCR_HOLDOV1);
+		pic_inrange = PIC_IN_OV_RANGE(pic1);
+#if defined(KT_IMPL)
+		if (pcr & CPC_PCR_SAMPLE_MODE_MASK)
+			pic_inrange = SAMPLE_PIC_IN_OV_RANGE(pic1);
+#endif
+		if (pic_inrange) {
 			pic1 = 0;
 			update_pic = B_TRUE;
 		}
@@ -440,7 +479,7 @@
 	ni2_generic_event_t	*gevp;
 	int			i;
 	uint32_t		evsel;
-#if defined(VFALLS_IMPL)
+#if defined(VFALLS_IMPL) || defined(KT_IMPL)
 	uint64_t		l2ctl = 0;
 #endif
 
@@ -481,25 +520,30 @@
 			    evp->emask_valid)
 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
 			evsel |= attrs[i].ka_val;
-#if defined(VFALLS_IMPL)
+#if defined(VFALLS_IMPL) || defined(KT_IMPL)
 		} else if (strcmp(attrs[i].ka_name, "l2ctl") == 0) {
-			if ((attrs[i].ka_val | VFALLS_L2_CTL_MASK) !=
-			    VFALLS_L2_CTL_MASK)
+			if ((attrs[i].ka_val | L2_CTL_MASK) !=
+			    L2_CTL_MASK)
 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
 			else
 				l2ctl = attrs[i].ka_val;
 #endif
+#if defined(KT_IMPL)
+		} else if (strcmp(attrs[i].ka_name, "sample") == 0) {
+			if (attrs[i].ka_val != 0)
+				flags |= CPC_COUNT_SAMPLE_MODE;
+#endif
 		} else
 			return (CPC_INVALID_ATTRIBUTE);
 	}
 
-#if defined(VFALLS_IMPL)
+#if defined(VFALLS_IMPL) || defined(KT_IMPL)
 	/*
 	 * Set PERF_CONTROL bits in L2_CONTROL_REG only when events have
 	 * SL bits equal to 3.
 	 */
-	if ((evsel & VFALLS_SL3_MASK) == VFALLS_SL3_MASK) {
-		if ((hv_niagara_setperf(HV_NIAGARA_L2_CTL, l2ctl)) != 0)
+	if ((evsel & SL3_MASK) == SL3_MASK) {
+		if ((hv_niagara_setperf(HV_L2_CTL, l2ctl)) != 0)
 			return (CPC_HV_NO_ACCESS);
 	}
 #endif
@@ -529,15 +573,15 @@
 		kpreempt_disable();
 
 		DTRACE_PROBE1(niagara2__setpcr, uint64_t,
-		    allstopped | CPC_NIAGARA2_PCR_HT);
-		if (hv_niagara_setperf(HV_NIAGARA_SPARC_CTL,
-		    allstopped | CPC_NIAGARA2_PCR_HT) != H_EOK) {
+		    allstopped | CPC_PCR_HT);
+		if (hv_niagara_setperf(HV_SPARC_CTL,
+		    allstopped | CPC_PCR_HT) != H_EOK) {
 			kpreempt_enable();
 			return (CPC_HV_NO_ACCESS);
 		}
 
 		DTRACE_PROBE1(niagara2__setpcr, uint64_t, allstopped);
-		(void) hv_niagara_setperf(HV_NIAGARA_SPARC_CTL, allstopped);
+		(void) hv_niagara_setperf(HV_SPARC_CTL, allstopped);
 
 		kpreempt_enable();
 	}
@@ -565,7 +609,7 @@
 	uint64_t		toe;
 
 	/* enable trap-on-event for pic0 and pic1 */
-	toe = (CPC_NIAGARA2_PCR_TOE0 | CPC_NIAGARA2_PCR_TOE1);
+	toe = (CPC_PCR_TOE0 | CPC_PCR_TOE1);
 
 	if ((pic0 = (ni2_pcbe_config_t *)kcpc_next_config(token, NULL, NULL)) ==
 	    NULL)
@@ -574,7 +618,7 @@
 	if ((pic1 = kcpc_next_config(token, pic0, NULL)) == NULL) {
 		pic1 = &nullcfg;
 		nullcfg.pcbe_flags = pic0->pcbe_flags;
-		toe = CPC_NIAGARA2_PCR_TOE0; /* enable trap-on-event for pic0 */
+		toe = CPC_PCR_TOE0; /* enable trap-on-event for pic0 */
 	}
 
 	if (pic0->pcbe_picno != 0) {
@@ -586,7 +630,7 @@
 		tmp = pic0;
 		pic0 = pic1;
 		pic1 = tmp;
-		toe = CPC_NIAGARA2_PCR_TOE1; /* enable trap-on-event for pic1 */
+		toe = CPC_PCR_TOE1; /* enable trap-on-event for pic1 */
 	}
 
 	if (pic0->pcbe_picno != 0 || pic1->pcbe_picno != 1)
@@ -605,16 +649,20 @@
 	ultra_setpic(((uint64_t)pic1->pcbe_pic << PIC1_SHIFT) |
 	    (uint64_t)pic0->pcbe_pic);
 
-	pcr = (pic0->pcbe_evsel & pcr_pic0_mask) << CPC_NIAGARA2_PCR_PIC0_SHIFT;
+	pcr = (pic0->pcbe_evsel & pcr_pic0_mask) << CPC_PCR_PIC0_SHIFT;
 	pcr |= (pic1->pcbe_evsel & pcr_pic1_mask) <<
-	    CPC_NIAGARA2_PCR_PIC1_SHIFT;
+	    CPC_PCR_PIC1_SHIFT;
 
 	if (pic0->pcbe_flags & CPC_COUNT_USER)
-		pcr |= (1ull << CPC_NIAGARA2_PCR_UT_SHIFT);
+		pcr |= (1ull << CPC_PCR_UT_SHIFT);
 	if (pic0->pcbe_flags & CPC_COUNT_SYSTEM)
-		pcr |= (1ull << CPC_NIAGARA2_PCR_ST_SHIFT);
+		pcr |= (1ull << CPC_PCR_ST_SHIFT);
 	if (pic0->pcbe_flags & CPC_COUNT_HV)
-		pcr |= (1ull << CPC_NIAGARA2_PCR_HT_SHIFT);
+		pcr |= (1ull << CPC_PCR_HT_SHIFT);
+#if defined(KT_IMPL)
+	if (pic0->pcbe_flags & CPC_COUNT_SAMPLE_MODE)
+		pcr |= (1ull << CPC_PCR_SAMPLE_MODE_SHIFT);
+#endif
 	pcr |= toe;
 
 	DTRACE_PROBE1(niagara2__setpcr, uint64_t, pcr);
@@ -628,7 +676,7 @@
 		 * fails, assume we no longer have access to
 		 * hpriv events.
 		 */
-		if (hv_niagara_setperf(HV_NIAGARA_SPARC_CTL, pcr) != H_EOK) {
+		if (hv_niagara_setperf(HV_SPARC_CTL, pcr) != H_EOK) {
 			kcpc_invalidate_config(token);
 			return;
 		}
@@ -661,7 +709,7 @@
 	 * back on ultra_setpcr which does not have write access to the
 	 * ht bit.
 	 */
-	if (hv_niagara_setperf(HV_NIAGARA_SPARC_CTL, allstopped) != H_EOK)
+	if (hv_niagara_setperf(HV_SPARC_CTL, allstopped) != H_EOK)
 		ultra_setpcr(allstopped);
 
 	ni2_cpc_counting[CPU->cpu_id] = B_FALSE;
@@ -717,7 +765,7 @@
 		pcr = ultra_getpcr();
 		DTRACE_PROBE1(niagara2__getpcr, uint64_t, pcr);
 		if (ni2_cpc_counting[CPU->cpu_id] &&
-		    !(pcr & CPC_NIAGARA2_PCR_HT)) {
+		    !(pcr & CPC_PCR_HT)) {
 			kcpc_invalidate_config(token);
 			return;
 		}
@@ -750,6 +798,8 @@
 	"UltraSPARC T2 Performance Counters",
 #elif defined(VFALLS_IMPL)
 	"UltraSPARC T2+ Performance Counters",
+#elif defined(KT_IMPL)
+	"UltraSPARC KT Performance Counters",
 #endif
 	&ni2_pcbe_ops
 };
--- a/usr/src/uts/sun4v/pcbe/niagara_pcbe.c	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/pcbe/niagara_pcbe.c	Fri Dec 11 10:41:17 2009 -0800
@@ -133,7 +133,7 @@
 	char *event;
 } ni_generic_event_t;
 
-#define	ULTRA_PCR_PRIVPIC	(UINT64_C(1) << CPC_NIAGARA_PCR_PRIVPIC)
+#define	ULTRA_PCR_PRIVPIC	(UINT64_C(1) << CPC_PCR_PRIVPIC)
 #define	NT_END 0xFF
 #define	GEN_EVT_END { NULL, NULL }
 
@@ -206,8 +206,8 @@
 
 	events = Niagara_names;
 	generic_events = Niagara_generic_names;
-	pcr_pic0_mask = CPC_NIAGARA_PCR_PIC0_MASK;
-	pcr_pic1_mask = CPC_NIAGARA_PCR_PIC1_MASK;
+	pcr_pic0_mask = CPC_PCR_PIC0_MASK;
+	pcr_pic1_mask = CPC_PCR_PIC1_MASK;
 
 	/*
 	 * Initialize the list of events for each PIC.
@@ -324,15 +324,15 @@
 
 	pcr = ultra_getpcr();
 	DTRACE_PROBE1(niagara__getpcr, uint64_t, pcr);
-	overflow =  (pcr & CPC_NIAGARA_PCR_OVF_MASK) >>
-	    CPC_NIAGARA_PCR_OVF_SHIFT;
+	overflow =  (pcr & CPC_PCR_OVF_MASK) >>
+	    CPC_PCR_OVF_SHIFT;
 #if 0
 	/*
 	 * Not needed if the CPC framework is responsible to stop counters
 	 * and that action ends up clearing overflow flags.
 	 */
 	if (overflow)
-		ultra_setpcr(pcr & ~CPC_NIAGARA_PCR_OVF_MASK);
+		ultra_setpcr(pcr & ~CPC_PCR_OVF_MASK);
 #endif
 	return (overflow);
 }
@@ -439,13 +439,13 @@
 	ultra_setpic(((uint64_t)pic1->pcbe_pic << PIC1_SHIFT) |
 	    (uint64_t)pic0->pcbe_pic);
 
-	pcr = (pic0->pcbe_bits & pcr_pic0_mask) << CPC_NIAGARA_PCR_PIC0_SHIFT;
-	pcr |= (pic1->pcbe_bits & pcr_pic1_mask) << CPC_NIAGARA_PCR_PIC1_SHIFT;
+	pcr = (pic0->pcbe_bits & pcr_pic0_mask) << CPC_PCR_PIC0_SHIFT;
+	pcr |= (pic1->pcbe_bits & pcr_pic1_mask) << CPC_PCR_PIC1_SHIFT;
 
 	if (pic0->pcbe_flags & CPC_COUNT_USER)
-		pcr |= (1ull << CPC_NIAGARA_PCR_USR);
+		pcr |= (1ull << CPC_PCR_USR);
 	if (pic0->pcbe_flags & CPC_COUNT_SYSTEM)
-		pcr |= (1ull << CPC_NIAGARA_PCR_SYS);
+		pcr |= (1ull << CPC_PCR_SYS);
 
 	DTRACE_PROBE1(niagara__setpcr, uint64_t, pcr);
 	ultra_setpcr(pcr);
--- a/usr/src/uts/sun4v/sys/error.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/sys/error.h	Fri Dec 11 10:41:17 2009 -0800
@@ -53,6 +53,7 @@
 #define	ERRH_DESC_DEF_NRE	3	/* Deferred non-resumalbe error */
 #define	ERRH_DESC_WARN_RE	4	/* Power-off for vBSC HostShutdown() */
 #define	ERRH_DESC_USER_DCORE	5	/* User initiated panic */
+#define	ERRH_DESC_SP		6	/* SP state change */
 
 /*
  * Sun4v Error Report Error Attributes specifies the attributes of the error
@@ -70,7 +71,7 @@
 #define	ERRH_ATTR_RQF		0x80000000	/* Resumablee Queue Full */
 
 /*
- * For Excution mode
+ * For Execution mode
  */
 #define	ERRH_MODE_MASK		0x03000000
 #define	ERRH_MODE_SHIFT		24
@@ -79,6 +80,14 @@
 #define	ERRH_MODE_PRIV		2
 
 /*
+ * For SP (Service Processor) state change
+ */
+#define	ERRH_SP_MASK		0x00000200
+#define	ERRH_SP_SHIFT		9
+#define	ERRH_SP_UNAVAILABLE	0
+#define	ERRH_SP_AVAILABLE	1
+
+/*
  * ASR register number
  */
 #define	ASR_REG_VALID		0x8000	/* Valid bit for register field */
--- a/usr/src/uts/sun4v/sys/hsvc.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/sys/hsvc.h	Fri Dec 11 10:41:17 2009 -0800
@@ -48,12 +48,15 @@
 #define	HSVC_GROUP_VSC			0x0102
 #define	HSVC_GROUP_NCS			0x0103
 #define	HSVC_GROUP_RNG			0x0104
+#define	HSVC_GROUP_PBOOT		0x0105
 #define	HSVC_GROUP_TPM			0x0107
+#define	HSVC_GROUP_REBOOT_DATA		0x0110
 #define	HSVC_GROUP_NIAGARA_CPU		0x0200
 #define	HSVC_GROUP_FIRE_PERF		0x0201
 #define	HSVC_GROUP_NIAGARA2_CPU		0x0202
 #define	HSVC_GROUP_NIU			0x0204
 #define	HSVC_GROUP_VFALLS_CPU		0x0205
+#define	HSVC_GROUP_KT_CPU		0x0209
 #define	HSVC_GROUP_DIAG			0x0300
 
 #ifndef _ASM
--- a/usr/src/uts/sun4v/sys/hypervisor_api.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/sys/hypervisor_api.h	Fri Dec 11 10:41:17 2009 -0800
@@ -167,6 +167,9 @@
 #define	MMU_STAT_AREA		0xfc
 #endif /* SET_MMU_STATS */
 
+#define	HV_MACH_PRI		0x170
+#define	HV_REBOOT_DATA_SET	0x172
+
 #define	HV_TPM_GET		0x176
 #define	HV_TPM_PUT		0x177
 
@@ -445,6 +448,8 @@
     uint32_t *cpuid);
 extern uint64_t hvldc_intr_settarget(uint64_t dev_hdl, uint32_t devino,
     uint32_t cpuid);
+extern uint64_t hv_mach_pri(uint64_t buffer_ra, uint64_t *buffer_sizep);
+extern uint64_t hv_reboot_data_set(uint64_t buffer_ra, uint64_t buffer_len);
 
 extern uint64_t	hv_guest_suspend(void);
 extern uint64_t	hv_tick_set_npt(uint64_t npt);
--- a/usr/src/uts/sun4v/sys/machparam.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/sys/machparam.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,15 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef _SYS_MACHPARAM_H
 #define	_SYS_MACHPARAM_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifdef	__cplusplus
 extern "C" {
 #endif
@@ -60,7 +58,7 @@
  * makefile.
  */
 #ifndef NCPU
-#define	NCPU	256
+#define	NCPU	512
 #endif
 
 /*
--- a/usr/src/uts/sun4v/sys/n2rng.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/sys/n2rng.h	Fri Dec 11 10:41:17 2009 -0800
@@ -62,6 +62,7 @@
 
 #define	N2RNG_BINDNAME_N2	"SUNW,n2-rng"
 #define	N2RNG_BINDNAME_VF	"SUNW,vf-rng"
+#define	N2RNG_BINDNAME_KT	"SUNW,kt-rng"
 
 #define	N2RNG_MAX_RNGS		4
 #define	N2RNG_INVALID_ID	(-1)
@@ -71,7 +72,8 @@
 typedef enum {
 	N2RNG_CPU_UNKNOWN,
 	N2RNG_CPU_N2,
-	N2RNG_CPU_VF
+	N2RNG_CPU_VF,
+	N2RNG_CPU_KT
 } n2rng_binding_t;
 
 typedef union n2rngctl {
--- a/usr/src/uts/sun4v/sys/niagara2regs.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/sys/niagara2regs.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,15 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef _SYS_NIAGARA2REGS_H
 #define	_SYS_NIAGARA2REGS_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -44,44 +42,61 @@
 #define	VFALLS_HSVC_MAJOR	1
 #define	VFALLS_HSVC_MINOR	0
 
+#define	KT_HSVC_MAJOR		1
+#define	KT_HSVC_MINOR		0
+
+#ifdef KT_IMPL
+
+/* Sample PIC overflow range is -2 to -1 */
+#define	SAMPLE_PIC_IN_OV_RANGE(x)	(((uint32_t)x >= 0xfffffffe) ? 1 : 0)
+
+#endif
+
 /* PIC overflow range is -16 to -1 */
 #define	PIC_IN_OV_RANGE(x)	(((uint32_t)x >= 0xfffffff0) ? 1 : 0)
 
 /*
- * Niagara2 SPARC Performance Instrumentation Counter
+ * SPARC Performance Instrumentation Counter
  */
 #define	PIC0_MASK	(((uint64_t)1 << 32) - 1)	/* pic0 in bits 31:0 */
 #define	PIC1_SHIFT	32				/* pic1 in bits 64:32 */
 
 /*
- * Niagara2 SPARC Performance Control Register
+ * SPARC Performance Control Register
  */
-#define	CPC_NIAGARA2_PCR_PRIV_SHIFT	0
-#define	CPC_NIAGARA2_PCR_ST_SHIFT	1
-#define	CPC_NIAGARA2_PCR_UT_SHIFT	2
+#define	CPC_PCR_PRIV_SHIFT	0
+#define	CPC_PCR_ST_SHIFT	1
+#define	CPC_PCR_UT_SHIFT	2
 
-#define	CPC_NIAGARA2_PCR_HT_SHIFT	3
-#define	CPC_NIAGARA2_PCR_HT		(1ull << CPC_NIAGARA2_PCR_HT_SHIFT)
+#define	CPC_PCR_HT_SHIFT	3
+#define	CPC_PCR_HT		(1ull << CPC_PCR_HT_SHIFT)
 
-#define	CPC_NIAGARA2_PCR_TOE0_SHIFT	4
-#define	CPC_NIAGARA2_PCR_TOE1_SHIFT	5
-#define	CPC_NIAGARA2_PCR_TOE0		(1ull << CPC_NIAGARA2_PCR_TOE0_SHIFT)
-#define	CPC_NIAGARA2_PCR_TOE1		(1ull << CPC_NIAGARA2_PCR_TOE1_SHIFT)
+#define	CPC_PCR_TOE0_SHIFT	4
+#define	CPC_PCR_TOE1_SHIFT	5
+#define	CPC_PCR_TOE0		(1ull << CPC_PCR_TOE0_SHIFT)
+#define	CPC_PCR_TOE1		(1ull << CPC_PCR_TOE1_SHIFT)
 
-#define	CPC_NIAGARA2_PCR_PIC0_SHIFT	6
-#define	CPC_NIAGARA2_PCR_PIC1_SHIFT	19
-#define	CPC_NIAGARA2_PCR_PIC0_MASK	UINT64_C(0xfff)
-#define	CPC_NIAGARA2_PCR_PIC1_MASK	UINT64_C(0xfff)
+#define	CPC_PCR_PIC0_SHIFT	6
+#define	CPC_PCR_PIC1_SHIFT	19
+#define	CPC_PCR_PIC0_MASK	UINT64_C(0xfff)
+#define	CPC_PCR_PIC1_MASK	UINT64_C(0xfff)
+
+#define	CPC_PCR_OV0_SHIFT	18
+#define	CPC_PCR_OV1_SHIFT	30
+#define	CPC_PCR_OV0_MASK	UINT64_C(0x40000)
+#define	CPC_PCR_OV1_MASK	UINT64_C(0x80000000)
 
-#define	CPC_NIAGARA2_PCR_OV0_SHIFT	18
-#define	CPC_NIAGARA2_PCR_OV1_SHIFT	30
-#define	CPC_NIAGARA2_PCR_OV0_MASK	UINT64_C(0x40000)
-#define	CPC_NIAGARA2_PCR_OV1_MASK	UINT64_C(0x80000000)
+#if defined(KT_IMPL)
+
+#define	CPC_PCR_SAMPLE_MODE_SHIFT	32
+#define	CPC_PCR_SAMPLE_MODE_MASK	(1ull << CPC_PCR_SAMPLE_MODE_SHIFT)
 
-#define	CPC_NIAGARA2_PCR_HOLDOV0_SHIFT  62
-#define	CPC_NIAGARA2_PCR_HOLDOV1_SHIFT  63
-#define	CPC_NIAGARA2_PCR_HOLDOV0	(1ull << CPC_NIAGARA2_PCR_HOLDOV0_SHIFT)
-#define	CPC_NIAGARA2_PCR_HOLDOV1	(1ull << CPC_NIAGARA2_PCR_HOLDOV1_SHIFT)
+#endif
+
+#define	CPC_PCR_HOLDOV0_SHIFT	62
+#define	CPC_PCR_HOLDOV1_SHIFT	63
+#define	CPC_PCR_HOLDOV0		(1ull << CPC_PCR_HOLDOV0_SHIFT)
+#define	CPC_PCR_HOLDOV1		(1ull << CPC_PCR_HOLDOV1_SHIFT)
 
 /*
  * Hypervisor FAST_TRAP API function numbers to get/set DRAM
@@ -98,32 +113,56 @@
 #define	HV_VFALLS_SETPERF		0x107
 
 /*
- * Niagara2 DRAM performance counters
+ * Hypervisor FAST_TRAP API function numbers to get/set DRAM
+ * performance counters for KT
  */
-#define	NIAGARA_DRAM_PIC0_SEL_SHIFT	0x4
-#define	NIAGARA_DRAM_PIC1_SEL_SHIFT	0x0
+#define	HV_KT_GETPERF			0x122
+#define	HV_KT_SETPERF			0x123
+
+#if defined(KT_IMPL)
+
+/*
+ * KT DRAM performance counters
+ */
+#define	DRAM_PIC0_SEL_SHIFT	0x0
+#define	DRAM_PIC1_SEL_SHIFT	0x4
 
-#define	NIAGARA_DRAM_PIC0_SHIFT		0x20
-#define	NIAGARA_DRAM_PIC0_MASK		0x7fffffff
-#define	NIAGARA_DRAM_PIC1_SHIFT		0x0
-#define	NIAGARA_DRAM_PIC1_MASK		0x7fffffff
+#define	DRAM_PIC0_SHIFT		0x0
+#define	DRAM_PIC0_MASK		0x7fffffff
+#define	DRAM_PIC1_SHIFT		0x20
+#define	DRAM_PIC1_MASK		0x7fffffff
+
+#else
+
+/*
+ * Niagara2 and VF DRAM performance counters
+ */
+#define	DRAM_PIC0_SEL_SHIFT	0x4
+#define	DRAM_PIC1_SEL_SHIFT	0x0
+
+#define	DRAM_PIC0_SHIFT		0x20
+#define	DRAM_PIC0_MASK		0x7fffffff
+#define	DRAM_PIC1_SHIFT		0x0
+#define	DRAM_PIC1_MASK		0x7fffffff
+
+#endif
 
 #if defined(NIAGARA2_IMPL)
 /*
  * SPARC/DRAM performance counter register numbers for HV_NIAGARA2_GETPERF
  * and HV_NIAGARA2_SETPERF for Niagara2
  */
-#define	NIAGARA_DRAM_BANKS		0x4
+#define	DRAM_BANKS		0x4
 
-#define	HV_NIAGARA_SPARC_CTL		0x0
-#define	HV_NIAGARA_DRAM_CTL0		0x1
-#define	HV_NIAGARA_DRAM_COUNT0		0x2
-#define	HV_NIAGARA_DRAM_CTL1		0x3
-#define	HV_NIAGARA_DRAM_COUNT1		0x4
-#define	HV_NIAGARA_DRAM_CTL2		0x5
-#define	HV_NIAGARA_DRAM_COUNT2		0x6
-#define	HV_NIAGARA_DRAM_CTL3		0x7
-#define	HV_NIAGARA_DRAM_COUNT3		0x8
+#define	HV_SPARC_CTL		0x0
+#define	HV_DRAM_CTL0		0x1
+#define	HV_DRAM_COUNT0		0x2
+#define	HV_DRAM_CTL1		0x3
+#define	HV_DRAM_COUNT1		0x4
+#define	HV_DRAM_CTL2		0x5
+#define	HV_DRAM_COUNT2		0x6
+#define	HV_DRAM_CTL3		0x7
+#define	HV_DRAM_COUNT3		0x8
 
 #elif defined(VFALLS_IMPL)
 /*
@@ -131,26 +170,68 @@
  * and HV_VFALLS_SETPERF for Victoria Falls
  * Support for 4-node configuration
  */
-#define	NIAGARA_DRAM_BANKS		0x8
+#define	DRAM_BANKS		0x8
+
+#define	HV_SPARC_CTL		0x0
+#define	HV_L2_CTL		0x1
+#define	HV_DRAM_CTL0		0x2
+#define	HV_DRAM_COUNT0		0x3
+#define	HV_DRAM_CTL1		0x4
+#define	HV_DRAM_COUNT1		0x5
+#define	HV_DRAM_CTL2		0x6
+#define	HV_DRAM_COUNT2		0x7
+#define	HV_DRAM_CTL3		0x8
+#define	HV_DRAM_COUNT3		0x9
+#define	HV_DRAM_CTL4		0xa
+#define	HV_DRAM_COUNT4		0xb
+#define	HV_DRAM_CTL5		0xc
+#define	HV_DRAM_COUNT5		0xd
+#define	HV_DRAM_CTL6		0xe
+#define	HV_DRAM_COUNT6		0xf
+#define	HV_DRAM_CTL7		0x10
+#define	HV_DRAM_COUNT7		0x11
+
+#define	L2_CTL_MASK		0x3
+#define	SL3_MASK		0x300
+
+#elif defined(KT_IMPL)
+/*
+ * SPARC/DRAM performance counter register numbers for HV_KT_GETPERF
+ * and HV_KT_SETPERF for KT
+ * Support for 4-node configuration
+ */
 
-#define	HV_NIAGARA_SPARC_CTL		0x0
-#define	HV_NIAGARA_L2_CTL		0x1
-#define	HV_NIAGARA_DRAM_CTL0		0x2
-#define	HV_NIAGARA_DRAM_COUNT0		0x3
-#define	HV_NIAGARA_DRAM_CTL1		0x4
-#define	HV_NIAGARA_DRAM_COUNT1		0x5
-#define	HV_NIAGARA_DRAM_CTL2		0x6
-#define	HV_NIAGARA_DRAM_COUNT2		0x7
-#define	HV_NIAGARA_DRAM_CTL3		0x8
-#define	HV_NIAGARA_DRAM_COUNT3		0x9
-#define	HV_NIAGARA_DRAM_CTL4		0xa
-#define	HV_NIAGARA_DRAM_COUNT4		0xb
-#define	HV_NIAGARA_DRAM_CTL5		0xc
-#define	HV_NIAGARA_DRAM_COUNT5		0xd
-#define	HV_NIAGARA_DRAM_CTL6		0xe
-#define	HV_NIAGARA_DRAM_COUNT6		0xf
-#define	HV_NIAGARA_DRAM_CTL7		0x10
-#define	HV_NIAGARA_DRAM_COUNT7		0x11
+#define	DRAM_BANKS		0x8
+
+#define	HV_SPARC_CTL		0x0
+#define	HV_L2_CTL		0x1
+#define	HV_DRAM_CTL0		0x2
+#define	HV_DRAM_COUNT0		0x3
+#define	HV_DRAM_CTL1		0x5
+#define	HV_DRAM_COUNT1		0x6
+#define	HV_DRAM_CTL2		0x8
+#define	HV_DRAM_COUNT2		0x9
+#define	HV_DRAM_CTL3		0xb
+#define	HV_DRAM_COUNT3		0xc
+#define	HV_DRAM_CTL4		0xe
+#define	HV_DRAM_COUNT4		0xf
+#define	HV_DRAM_CTL5		0x11
+#define	HV_DRAM_COUNT5		0x12
+#define	HV_DRAM_CTL6		0x14
+#define	HV_DRAM_COUNT6		0x15
+#define	HV_DRAM_CTL7		0x17
+#define	HV_DRAM_COUNT7		0x18
+
+#define	L2_CTL_MASK		0x3
+#define	SL3_MASK		0x300
+
+#endif
+
+#ifdef VFALLS_IMPL
+/*
+ * Performance counters for Zambezi.  Zambezi is only supported with
+ * Victoria Falls (UltraSPARC-T2+).
+ */
 
 #define	ZAMBEZI_PIC0_SEL_SHIFT		0x0
 #define	ZAMBEZI_PIC1_SEL_SHIFT		0x8
@@ -235,9 +316,6 @@
 #define	HV_ZAM3_ASU_PIC0		0x58
 #define	HV_ZAM3_ASU_PIC1		0x59
 
-#define	VFALLS_L2_CTL_MASK		0x3
-#define	VFALLS_SL3_MASK			0x300
-
 #endif
 
 #ifndef _ASM
--- a/usr/src/uts/sun4v/sys/niagaraasi.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/sys/niagaraasi.h	Fri Dec 11 10:41:17 2009 -0800
@@ -19,15 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef _SYS_NIAGARAASI_H
 #define	_SYS_NIAGARAASI_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 /*
  * alternate address space identifiers
  *
@@ -40,18 +38,12 @@
 extern "C" {
 #endif
 
-#if defined(NIAGARA_IMPL) || defined(NIAGARA2_IMPL) || defined(VFALLS_IMPL)
-
 /*
- * NIAGARA, NIAGARA2 and Victoria Falls specific ASIs
+ * NIAGARA, NIAGARA2, Victoria Falls and KT specific ASIs
  */
 #define	ASI_BLK_INIT_QUAD_LDD_AIUS	0x23	/* block as if user secondary */
 #define	ASI_BLK_INIT_ST_QUAD_LDD_P	0xE2	/* block initializing primary */
 
-#else
-#error	"This file has ASIs specific to Niagara, Niagara2 and VFalls CPUs"
-#endif	/* NIAGARA_IMPL */
-
 #ifdef __cplusplus
 }
 #endif
--- a/usr/src/uts/sun4v/sys/niagararegs.h	Thu Dec 10 20:51:16 2009 -0800
+++ b/usr/src/uts/sun4v/sys/niagararegs.h	Fri Dec 11 10:41:17 2009 -0800
@@ -20,15 +20,13 @@
  */
 
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef _SYS_NIAGARAREGS_H
 #define	_SYS_NIAGARAREGS_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -43,30 +41,30 @@
  * Niagara SPARC Performance Control Register
  */
 
-#define	CPC_NIAGARA_PCR_PRIVPIC		0
-#define	CPC_NIAGARA_PCR_SYS		1
-#define	CPC_NIAGARA_PCR_USR		2
+#define	CPC_PCR_PRIVPIC		0
+#define	CPC_PCR_SYS		1
+#define	CPC_PCR_USR		2
 
-#define	CPC_NIAGARA_PCR_PIC0_SHIFT	4
-#define	CPC_NIAGARA_PCR_PIC1_SHIFT	0
-#define	CPC_NIAGARA_PCR_PIC0_MASK	UINT64_C(0x7)
-#define	CPC_NIAGARA_PCR_PIC1_MASK	UINT64_C(0)
+#define	CPC_PCR_PIC0_SHIFT	4
+#define	CPC_PCR_PIC1_SHIFT	0
+#define	CPC_PCR_PIC0_MASK	UINT64_C(0x7)
+#define	CPC_PCR_PIC1_MASK	UINT64_C(0)
 
-#define	CPC_NIAGARA_PCR_OVF_MASK	UINT64_C(0x300)
-#define	CPC_NIAGARA_PCR_OVF_SHIFT	8
+#define	CPC_PCR_OVF_MASK	UINT64_C(0x300)
+#define	CPC_PCR_OVF_SHIFT	8
 
 /*
  * Niagara DRAM performance counters
  */
-#define	NIAGARA_DRAM_BANKS		0x4
+#define	DRAM_BANKS		0x4
 
-#define	NIAGARA_DRAM_PIC0_SEL_SHIFT	0x4
-#define	NIAGARA_DRAM_PIC1_SEL_SHIFT	0x0
+#define	DRAM_PIC0_SEL_SHIFT	0x4
+#define	DRAM_PIC1_SEL_SHIFT	0x0
 
-#define	NIAGARA_DRAM_PIC0_SHIFT		0x20
-#define	NIAGARA_DRAM_PIC0_MASK		0x7fffffff
-#define	NIAGARA_DRAM_PIC1_SHIFT		0x0
-#define	NIAGARA_DRAM_PIC1_MASK		0x7fffffff
+#define	DRAM_PIC0_SHIFT		0x20
+#define	DRAM_PIC0_MASK		0x7fffffff
+#define	DRAM_PIC1_SHIFT		0x0
+#define	DRAM_PIC1_MASK		0x7fffffff
 
 /*
  * Niagara JBUS performance counters
@@ -99,14 +97,14 @@
  */
 #define	HV_NIAGARA_JBUS_CTL		0x0
 #define	HV_NIAGARA_JBUS_COUNT		0x1
-#define	HV_NIAGARA_DRAM_CTL0		0x2
-#define	HV_NIAGARA_DRAM_COUNT0		0x3
-#define	HV_NIAGARA_DRAM_CTL1		0x4
-#define	HV_NIAGARA_DRAM_COUNT1		0x5
-#define	HV_NIAGARA_DRAM_CTL2		0x6
-#define	HV_NIAGARA_DRAM_COUNT2		0x7
-#define	HV_NIAGARA_DRAM_CTL3		0x8
-#define	HV_NIAGARA_DRAM_COUNT3		0x9
+#define	HV_DRAM_CTL0		0x2
+#define	HV_DRAM_COUNT0		0x3
+#define	HV_DRAM_CTL1		0x4
+#define	HV_DRAM_COUNT1		0x5
+#define	HV_DRAM_CTL2		0x6
+#define	HV_DRAM_COUNT2		0x7
+#define	HV_DRAM_CTL3		0x8
+#define	HV_DRAM_COUNT3		0x9
 
 #ifndef _ASM